seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
22265028586 | from mensajes import separador_chats
from usuarios import inicio_sesion
from parametros import VOLVER_FRASE, ABANDONAR_FRASE
from datetime import datetime, date
def lista_grupos():
#lee el archivo
with open('grupos.csv', 'rt') as archivo_grupos:
lista_grupos = archivo_grupos.readlines()
for x in range(len(lista_grupos)):
lista_grupos[x] = lista_grupos[x].strip().split(",")
return lista_grupos
def dic_grupos():
#crea un diccionario para agrupar los usuarios de cada grupo
grupos = {}
lista_grupos_para_dic = lista_grupos()
variable = ""
temporal = []
nombres_grupos = []
for elemento in lista_grupos_para_dic:
if variable == "" or variable != elemento[0]:
grupos[variable] = temporal
nombres_grupos.append(variable)
variable = elemento[0]
temporal = []
temporal.append(elemento[1])
else:
temporal.append(elemento[1])
grupos[variable] = temporal
nombres_grupos.pop(0)
nombres_grupos.pop(0)
return grupos, nombres_grupos
def grupos_usuario(ingresado):
diccionario_grupos, nombres_grupos = dic_grupos()
grupos_ingresado = []
for elemento in diccionario_grupos:
for usuario in diccionario_grupos[elemento]:
if usuario == ingresado:
grupos_ingresado.append(elemento)
return grupos_ingresado
def chat_grupo(grupo):
chat_grupo = []
chats_grupos = separador_chats('nada', 'grupo')
for elemento in chats_grupos:
if elemento[2] == grupo:
chat_grupo.append(elemento)
return chat_grupo
def mostrar_mensaje(grupo):
chat = chat_grupo(grupo)
for elemento in chat:
if chat == "vacio":
print("Inicia la conversacion con este grupo")
break
else:
print(f"[{elemento[3]}] De {elemento[1]}: '{elemento[4]}'")
return
def eliminar_usuario(ingresado, grupo):
lista = lista_grupos()
for elemento in lista:
if elemento[0] == grupo and elemento[1] == ingresado:
lista.pop(lista.index(elemento))
with open('grupos.csv', 'w') as archivo_grupos:
for elemento in lista:
escribir = f"{elemento[0]},{elemento[1]}"+"\n"
archivo_grupos.write(escribir)
with open('mensajes.csv', 'a') as archivo_chats:
archivo_chats.write("\n")
hora = datetime.now()
fecha = datetime.today()
envio = str(fecha.strftime("%Y/%m/%d")) + " " + str(hora.strftime("%H:%M:%S"))
archivo_chats.write( \
f"grupo,Sistema,{grupo},{envio},El usuario {ingresado} ha abandonado el grupo")
mostrar_mensaje(grupo)
return
def nuevo_mensaje(ingresado, grupo):
print(f"Escribe una respuesta ¿o ingresa '{VOLVER_FRASE}' para regresar al menu de contactos")
print(f"Si deseas abandonar el grupo escribe '{ABANDONAR_FRASE}', se notificará al grupo")
texto = input()
if texto == VOLVER_FRASE:
return seleccion_grupo(ingresado)
elif texto == ABANDONAR_FRASE:
eliminar_usuario(ingresado, grupo)
return "menu"
else:
hora = datetime.now()
fecha = datetime.today()
envio = str(fecha.strftime("%Y/%m/%d")) + " " + str(hora.strftime("%H:%M:%S"))
mensaje = f"grupo,{ingresado},{grupo},{envio},{texto}"
with open('mensajes.csv', 'a') as archivo_chats:
archivo_chats.write("\n")
archivo_chats.write(mensaje)
mostrar_mensaje(grupo)
return nuevo_mensaje(ingresado, grupo)
def seleccion_grupo(ingresado):
print("***** Ver Grupos *****")
print("Selecciona un grupo para ver tus conversaciones, o 0 para volver atras:")
grupos_ingresado = grupos_usuario(ingresado)
for x in range(len(grupos_ingresado)):
print(f"[{x+1}] {grupos_ingresado[x]}")
print("[0] Volver")
seleccion = input("Ingrese el numero del usuario seleccionado: ")
if seleccion == "0":
#indica a menus.py que ejecute menu_grupos()
return "menu"
elif seleccion.isdigit() == False:
print("Solo puedes ingresar numeros")
return seleccion_grupo(ingresado)
elif int(seleccion) < 1 or int(seleccion) > len(grupos_ingresado):
print("El numero ingresado no es valido")
return seleccion_grupo(ingresado)
else:
if grupos_ingresado[int(seleccion)-1] == "vacio":
print("No tienes ningun grupo en tu lista, crea uno para comenzar a chatear")
return seleccion_grupo(ingresado)
else:
print(f"chat del grupo '{grupos_ingresado[int(seleccion)-1]}'")
#grupos_ingresado[int(seleccion)-1] -> grupo seleccionado, contraresta print(f"[x+1]")
mostrar_mensaje(grupos_ingresado[int(seleccion)-1])
accion = nuevo_mensaje(ingresado, grupos_ingresado[int(seleccion)-1])
return accion
def crear_grupo(ingresado):
nombre = input("Ingresa el nombre que tendra el grupo (minimo un caracter): ")
if len(nombre) < 1:
print("el nombre ingresado es de menos de un caracter")
return
desechable, grupos_existentes = dic_grupos()
for elemento in grupos_existentes:
if elemento == nombre:
print("Este nombre ya esta en uso, ingrese otro")
return
print("Ahora debe registrar a los usuarios que formaran parte del grupo")
print("Para esto deberas incluirte a ti mismo y seguir el siguiente formato:")
print("tú;usuario2;usuario3;.....;usuarioN")
print("Como minimo el grupo debe tener dos usuarios incluyendote")
usuarios = input()
correcto_1 = False
correcto_2 = 0
for elemento in usuarios:
if elemento == ";":
correcto_1 = True
break
if correcto_1 == True:
usuarios = usuarios.split(";")
if len(usuarios) < 2:
print("No cumples con la cantidad minima de usuarios")
return
else:
if usuarios[0] == ingresado:
for elemento in usuarios:
existente = inicio_sesion(elemento)
if existente == True:
correcto_2 += 1
else:
print(f"El nombre de usuario {elemento} no existe")
return
else:
print("No te ingresaste primero en la lista")
return
else:
print("El formato de la lista de usuarios no coincide con el especificado")
return
if correcto_2 == len(usuarios):
with open('grupos.csv', 'a') as archivo_grupos:
for elemento in usuarios:
archivo_grupos.write(f"{nombre},{elemento}"+'\n')
print("Grupo creado con exito, regresando al menu")
return
else:
print("Ocurrio un error al crear el grupo")
return | Alzvil/IIC2233-Progra-Avanzada-Tareas-2021-1 | Tareas/T0/grupos.py | grupos.py | py | 6,884 | python | es | code | 0 | github-code | 36 |
23268928678 | #import sys, os
#sys.path.append(os.path.abspath(""))
from functionsDB.ConnectionDB import abrirconexion, cerrarconexion
from functionsDB.entity.comentario import Comentario
from datetime import datetime
def altacomentario(comentario):
cur,con = abrirconexion()
sql = "insert into comentario(fecha,hora,contenido,usuario,producto) values('"+comentario.get_fecha()+"','"+comentario.get_hora()+"','"+comentario.get_contenido()+"','"+comentario.get_usuario()+"','"+str(comentario.get_producto())+"')"
cur.execute(sql)
cerrarconexion(cur,con)
def bajacomentario(comentario):
cur,con = abrirconexion()
sql = "delete from comentario where codigo= '"+str(comentario.get_codigo())+"'"
cur.execute(sql)
cerrarconexion(cur,con)
def modificarcomentario(comentario):
cur,con = abrirconexion()
sql = "update comentario set fecha='"+comentario.get_fecha()+"', hora='"+comentario.get_hora()+"', contenido='"+comentario.get_contenido()+"', usuario='"+comentario.get_usuario()+"', producto='"+str(comentario.get_producto())+"' where codigo='"+str(comentario.get_codigo())+"'"
cur.execute(sql)
cerrarconexion(cur,con)
def listadocomentarios():
results = []
cur,con = abrirconexion()
sql = "select c.codigo,c.fecha,c.hora,c.contenido,c.usuario,c.producto,u.urlfoto from comentario c, usuario u where u.email=c.usuario"
cur.execute(sql)
columns = list(map(lambda x: x[0], cur.description))
for row in cur.fetchall():
results.append(dict(zip(columns, row)))
fecha = results[-1]['fecha']
hora = results[-1]['hora']
fecha = datetime(fecha.year, fecha.month, fecha.day)
hora = datetime(fecha.year, fecha.month, fecha.day, hora.hour, hora.minute, hora.second)
results[-1]['fecha'] = fecha.strftime('%Y-%m-%d')
results[-1]['hora'] = hora.strftime('%H:%M:%S')
cerrarconexion(cur,con)
return results
"""
now = datetime.now()
coment = Comentario()
coment.set_codigo(12)
coment.set_fecha(str(now.year)+'-'+str(now.month)+'-'+str(now.day))
coment.set_hora(str(20)+':'+str(12)+':'+str(now.second))
coment.set_contenido('mensaje de error')
coment.set_usuario("exe.gye@gmail.com")
coment.set_producto(2)
"""
#altacomentario(coment)
#modificarcomentario(coment)
#bajacomentario(coment)
| exegonzalez/Taller-de-Integracion | App/src/functionsDB/ABMComentario.py | ABMComentario.py | py | 2,295 | python | es | code | 1 | github-code | 36 |
11545489040 | from logging import INFO, getLogger, StreamHandler, Formatter, DEBUG, INFO
from os import environ
from urlparse import urlparse
from gunicorn.glogging import Logger
from log4mongo.handlers import MongoHandler, MongoFormatter
# parse the MONGOLAB_URI environment variable to get the auth/db info
MONGOLAB_URI_PARSED = urlparse( environ[ 'MONGOLAB_URI' ] )
MONGOLAB_CONF_DICT = dict(
host = MONGOLAB_URI_PARSED.hostname,
port = MONGOLAB_URI_PARSED.port,
database_name = MONGOLAB_URI_PARSED.path[ 1: ],
username = MONGOLAB_URI_PARSED.username,
password = MONGOLAB_URI_PARSED.password
)
# determine if we are running in production (e.g., on Heroku), or locally
PRODUCTION = environ[ 'VERSION' ] == 'production'
# setup the root logger so that application logs go to mongolab
def setup_logging( name ):
root_logger = getLogger( name )
if PRODUCTION:
handler = MongoHandler( level = DEBUG, collection = 'application-log', **MONGOLAB_CONF_DICT )
handler.setFormatter( MongoFormatter() )
else:
handler = StreamHandler()
handler.setLevel( DEBUG )
handler.setFormatter( Formatter( '%(asctime)s [%(process)d] [%(levelname)s/APPLICATION] %(message)s', '%Y.%m:%d %H:%M:%S' ) )
root_logger.setLevel( DEBUG )
root_logger.addHandler( handler )
# define a logger so that gunicorn sends access and error logs to mongolab
class GunicornLogger( Logger ):
def __init__( self, cfg ):
super( GunicornLogger, self ).__init__( cfg )
if PRODUCTION:
access_handler = MongoHandler( level = INFO, collection = 'access-log', **MONGOLAB_CONF_DICT )
error_handler = MongoHandler( level = INFO, collection = 'error-log', **MONGOLAB_CONF_DICT )
access_handler.setFormatter( MongoFormatter() )
error_handler.setFormatter( MongoFormatter() )
self.error_log.addHandler( error_handler )
self.error_log.setLevel( INFO )
else:
access_handler = StreamHandler()
access_handler.setFormatter( Formatter( '%(asctime)s [%(process)d] [%(levelname)s/ACCESS] %(message)s', '%Y.%m:%d %H:%M:%S' ) )
self.access_log.addHandler( access_handler )
self.access_log.setLevel( INFO )
| mapio/heroku-log4mongo | heroku-log4mongo/logger.py | logger.py | py | 2,096 | python | en | code | 5 | github-code | 36 |
939034143 | from datetime import datetime
from src.app import db, app
import uuid
from src.models.mixins import BaseMixin
from src.helpers import *
from sqlalchemy import exc
class BookRating(BaseMixin, db.Model):
__tablename__ = "book_ratings"
rating_id = db.Column(db.String(50), primary_key=True, default=lambda: uuid.uuid1().hex)
book_id = db.Column(db.String(50), db.ForeignKey("books.book_id"), nullable=False)
list_id = db.Column(db.String(50), db.ForeignKey("reading_lists.list_id"), nullable=False)
rating = db.Column(db.Integer, nullable=False)
notes = db.Column(db.String(500))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated_at = db.Column(db.DateTime)
_validations_ = {
"book_id": {"type": "string", "required": True, "min_length": 32, "max_length": 32},
"list_id": {"type": "string", "required": True, "min_length": 32, "max_length": 32},
"rating": {"type": "integer", "required": True, "min_value": 0, "max_value": 5},
"notes": {"type": "string", "required": False, "min_length": 0, "max_length": 500},
}
_restrict_in_creation_ = ["rating_id", "created_at", "updated_at"]
_restrict_in_update_ = ["rating_id", "created_at", "book_id", "list_id"]
__table_args__ = (db.UniqueConstraint('book_id', 'list_id', name='uq_book_list'),)
@staticmethod
def create_a_rating(data):
"""
Create a new rating
:param data: [object] contains rating info in key value pair
:return [dict]
"""
app.logger.info('Preparing to create a new rating')
new_rating = BookRating()
allowed_columns = list_diff(BookRating().columns_list(), BookRating()._restrict_in_creation_)
for column in allowed_columns:
if column in data:
setattr(new_rating, column, data.get(column))
app.logger.debug('Populated new rating object with provided data')
# Check if data is valid
result = new_rating.validate_and_sanitize(BookRating()._restrict_in_creation_)
if result.get("errors"):
app.logger.error('Validation and sanitization failed for new rating')
app.logger.debug(f'Error details: {result["errors"]}')
return {"error": result["errors"]}
try:
db.session.add(new_rating)
db.session.flush()
db.session.commit()
app.logger.info(f'New rating created successfully with id {new_rating.rating_id}')
return {"rating_id": str(new_rating.rating_id)}
except exc.IntegrityError as e:
db.session.rollback()
err = e.orig.diag.message_detail.rsplit(',', 1)[-1]
app.logger.error('Integrity error occurred while creating new rating')
app.logger.debug(f'Error details: {err.replace(")", "")}')
return {"error": err.replace(")", "")}
except Exception as e:
db.session.rollback()
app.logger.error('Unknown error occurred while creating new rating')
app.logger.debug(f'Error details: {str(e)}')
return {"error": "failed to create rating"}
@staticmethod
def get_ratings(rating_id=None, return_as_object=False, page=None, offset=None, orderby=None, sortby=None):
"""
Get ratings info
:param rating_id: [str] book_ratings table primary key
:param return_as_object: [bool] do we need to return the list of objects or dictionary for rows?
:param page: [int] page number
:param offset: [int] page offset - number of rows to return
:return [list]
"""
page = page or 1
offset = offset or 20
begin_query = db.session.query(BookRating)
app.logger.info('Book rating retrieval request received')
app.logger.debug(f'Request parameters - rating_id: {rating_id}, return_as_object: {return_as_object}, page: {page}, offset: {offset}, orderby: {orderby}, sortby: {sortby}')
try:
if not rating_id:
offset = int(offset)
page = int(page)-1
if orderby and sortby:
if orderby == -1:
result = begin_query.order_by(getattr(BookRating, sortby).desc()).offset(page*offset).limit(offset).all()
elif orderby == 1:
result = begin_query.order_by(getattr(BookRating, sortby).asc()).offset(page*offset).limit(offset).all()
else:
result = begin_query.order_by(BookRating.created_at).offset(page*offset).limit(offset).all()
count = BookRating.query.count()
meta_data = {"rating_count": count, "page_number": int(page) + 1, "page_offset": offset}
app.logger.info(f'Retrieved {count} ratings')
if result:
if return_as_object:
return result
else:
return {"ratings": [row.to_dict() for row in result], **meta_data}
else:
result = begin_query.filter(
BookRating.rating_id == rating_id
).all()
if result:
app.logger.info(f'Retrieved rating with rating_id {rating_id}')
return result[0] if return_as_object else result[0].to_dict()
except Exception as e:
app.logger.error('Book rating retrieval failed')
app.logger.debug(f'Error details: {e}, rating_id: {rating_id}, page: {page}, offset: {offset}')
return {"error" : "No rating found"}
@staticmethod
def update_a_rating(rating_id, data):
"""
Update an existing rating
:param rating_id: [str] book_ratings table primary key
:param data: [dict] rating updating field data
:return [dict]
"""
app.logger.info(f'Update rating request received for rating id: {rating_id}')
app.logger.debug(f'Request data: {data}')
rating = db.session.get(BookRating, rating_id)
if not rating:
app.logger.error(f'No rating found with id: {rating_id}')
return {}
try:
for column in data:
if hasattr(rating, column):
setattr(rating, column, data[column])
rating.updated_at = datetime.utcnow()
db.session.commit()
app.logger.info('Rating successfully updated')
return {'message': 'successfully updated rating_id={}'.format(rating_id)}
except Exception as e:
app.logger.error('Rating update failed')
app.logger.debug(f'Error details: {str(e)}')
return {"error": "failed to update rating"}
@staticmethod
def delete_rating_permanently(rating_id):
"""
Delete a rating permanently
:param rating_id: [str] book_ratings table primary key
:return [dict]
"""
app.logger.info(f'Request to delete rating with id {rating_id} received')
rating = db.session.get(BookRating, rating_id)
if rating:
try:
db.session.delete(rating)
db.session.commit()
app.logger.info('Rating successfully deleted')
return {'message': 'successfully deleted rating_id={}'.format(rating_id)}
except Exception as e:
app.logger.error('Rating deletion failed')
app.logger.debug(f'Error details: {e}')
return {"error": "Rating deletion failed"}
else:
app.logger.warning(f'Rating with id {rating_id} not found')
return {} | Aaronh3k/book-status-api | src/models/book_ratings.py | book_ratings.py | py | 7,769 | python | en | code | 0 | github-code | 36 |
150466023 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.views import generic
from django.views import View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import Play, Game
from play.forms import PlayCreateForm
from datetime import datetime
from datetime import date
# Create your views here.
class PlayCreate(View):
def post(self, request):
# Create a form instance and populate it with data from the request (binding):
form = PlayCreateForm(request.POST)
if form.is_valid():
play = Play()
# process the data in form.cleaned_data as required (here we just write it to the model location field)
play.game = form.cleaned_data['game']
play.location = form.cleaned_data['location']
play.play_date = form.cleaned_data['play_date']
play.play_complete = form.cleaned_data['play_complete']
play.save()
return HttpResponseRedirect(reverse('start-play') )
else:
proposed_location = "xxx"
proposed_date = date.today()
form = PlayCreateForm(initial={'location': proposed_location, 'play_date': proposed_date})
context = {'form':form,}
return render(request, 'play_form.html', context)
def get(self, request):
proposed_location = ""
proposed_date = date.today()
form = PlayCreateForm(initial={'location': proposed_location, 'play_date': proposed_date})
context = {'form':form,}
return render(request, 'play_form_new.html', context)
class PlayUpdate(View):
model = Play()
def post(self, request, pk):
play = get_object_or_404(Play, pk=pk)
# Create a form instance and populate it with data from the request (binding):
form = PlayCreateForm(request.POST)
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model location field)
play.game = form.cleaned_data['game']
play.location = form.cleaned_data['location']
play.play_date = form.cleaned_data['play_date']
play.play_complete = form.cleaned_data['play_complete']
play.save()
return HttpResponseRedirect(reverse('play-list') )
context = {'form':form, 'game_desc': play.game.description}
return render(request, 'play_form.html', context)
def get(self, request, pk):
play = get_object_or_404(Play, pk=pk)
form = PlayCreateForm(initial={'location': play.location, 'play_date': play.play_date, 'play_complete': play.play_complete})
context = {'form':form,'game_desc': play.game.description}
return render(request, 'play_form_update.html', context)
class PlayListView(generic.ListView):
model = Play
queryset = Play.objects.filter(play_complete = False)
class PlayArchiveListView(generic.ListView):
model = Play
context_object_name = 'play_archive_list'
queryset = Play.objects.filter(play_complete = True)
template_name = 'play/play_archive_list.html'
class PlayDetailView(generic.DetailView):
model = Play
class PlayDelete(generic.DeleteView):
model = Play
success_url = reverse_lazy('play-list') | edbranson/scorekeeping | scorekeeping/play/views.py | views.py | py | 3,501 | python | en | code | 0 | github-code | 36 |
17613529074 | import pandas as pd, numpy as np
import pytzer as pz
from pytzer.libraries import Moller88
# Import data and prepare for tests
pz = Moller88.set_func_J(pz)
data = pd.read_csv("tests/data/M88 Table 4.csv").set_index("point")
m_cols = ["Na", "Ca", "Cl", "SO4"]
params = Moller88.get_parameters(solutes=m_cols, temperature=383.15)
def get_activity_water(data_row):
dr = pz.odict(data_row[m_cols])
return np.round(pz.activity_water(dr, **params).item(), decimals=4)
data["a_H2O_pytzer"] = data.apply(get_activity_water, axis=1)
def test_M88_activity_water():
"""Can we reproduce the values from M88 Table 4?
We presume that their Point 4 contains some typo, hence worse agreement.
"""
for i, row in data.iterrows():
if row.name == 4:
assert np.isclose(row["a_H2O"], row["a_H2O_pytzer"], rtol=0, atol=0.01)
else:
assert np.isclose(row["a_H2O"], row["a_H2O_pytzer"], rtol=0, atol=0.0001)
# test_M88_activity_water()
| mvdh7/pytzer | tests/test_M88.py | test_M88.py | py | 984 | python | en | code | 15 | github-code | 36 |
23923499833 | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate, Conv2D, UpSampling2D
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, LeakyReLU
from tensorflow.keras import backend as K
from keras.layers.core import Activation
from keras.utils.generic_utils import get_custom_objects
from blocks import residual_block, const_upscale_block_100, const_upscale_block_5
def generator(mode,
arch,
input_channels=6,
latent_variables=1,
noise_channels=8,
filters_gen=64,
img_shape=(100, 100),
constant_fields=1, #2
conv_size=(3, 3),
padding=None,
stride=1,
relu_alpha=0.2,
norm=None,
dropout_rate=None):
forceconv = True if arch == "forceconv" else False
# Network inputs
# low resolution condition
generator_input = Input(shape=(None, None, input_channels), name="lo_res_inputs")
# generator_input = Input(shape=(None, input_channels), name="lo_res_inputs")
print(f"generator_input shape: {generator_input.shape}")
# constant fields
const_input = Input(shape=(None, None, constant_fields), name="hi_res_inputs")
# const_input = Input(shape=(None, None, constant_fields), name="test")
print(f"constants_input shape: {const_input.shape}")
# Convolve constant fields down to match other input dimensions
upscaled_const_input = const_upscale_block_100(const_input, filters=filters_gen)
# upscaled_const_input = const_upscale_block_5(const_input, filters=filters_gen)
print(f"upscaled constants shape: {upscaled_const_input.shape}")
# concatenate with constant field?
# but maybe with should happen after the residual blocks? Otherwise you're losing information?
# generator_intermediate = concatenate([generator_input, upscaled_const_input])
# (1,1) to (5,5), concatenate then upscale to (10,10) fingers crossed that works
block_channels = [2*filters_gen, filters_gen]
print('initial input shape',generator_input.shape)
generator_intermediate = Dense(25, activation='relu')(generator_input)
generator_intermediate = UpSampling2D(size=(5, 5), interpolation='bilinear')(generator_intermediate)
print('shape after dense layer',generator_intermediate.shape)
# generator_intermediate = UpSampling2D(size=(5, 5), interpolation='bilinear')(generator_input)
print(f"Shape after upsampling step 1: {generator_intermediate.shape}")
for i in range(1):
generator_intermediate = residual_block(generator_intermediate, filters=block_channels[0], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
generator_intermediate = UpSampling2D(size=(2, 2), interpolation='bilinear')(generator_intermediate)
print(f"Shape after upsampling step 2: {generator_intermediate.shape}")
for i in range(1):
generator_intermediate = residual_block(generator_intermediate, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
# feed in noise as 10 x 10 array
if mode == 'GAN':
# noise
# noise_input = Input(shape=(None, None, noise_channels), name="noise_input")
noise_input = Input(shape=(None, None, noise_channels), name="noise_input") # when name='noise_input' there seems to be 2 noise input layers, even though noise_input_hr is a distinct layer, but works if this layer is called 'noise_inpu'
print(f"noise_input shape 1: {noise_input.shape}")
# Concatenate all inputs together
generator_output = concatenate([generator_intermediate, upscaled_const_input, noise_input])
# generator_output = concatenate([generator_input, noise_input])
print(f"Shape after first concatenate: {generator_output.shape}")
# Pass through 3 residual blocks
n_blocks = 2 # this was 3 then 6 now 2
for i in range(n_blocks):
generator_output = residual_block(generator_output, filters=filters_gen, conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print('End of first residual block')
print(f"Shape after first residual block: {generator_output.shape}")
# Upsampling from (10,10) to (100,100) with alternating residual blocks
# Now need to upsample from (1,1) to (100,100) I guess?
block_channels = [2*filters_gen, filters_gen]
# continue with normal upsampling from og WGAN
generator_output = UpSampling2D(size=(5, 5), interpolation='bilinear')(generator_output)
print(f"Shape after upsampling step 3: {generator_output.shape}")
for i in range(1):
generator_output = residual_block(generator_output, filters=block_channels[0], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after residual block: {generator_output.shape}")
# concatenate hr noise as a 50 x 50 array
noise_input_hr = Input(shape=(None, None, noise_channels), name = "hr_noise_input_hr")
print('hr noise input shape: ',noise_input_hr.shape)
generator_output = concatenate([generator_output, noise_input_hr])
# Pass through 3 residual blocks
for i in range(2):
generator_output = residual_block(generator_output, filters=filters_gen, conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after third residual block: {generator_output.shape}")
generator_output = UpSampling2D(size=(2, 2), interpolation='bilinear')(generator_output)
print(f"Shape after upsampling step 4: {generator_output.shape}")
for i in range(2):
generator_output = residual_block(generator_output, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after residual block: {generator_output.shape}")
# now upsampling to 200 x 200
generator_output = UpSampling2D(size=(2, 2), interpolation='bilinear')(generator_output)
print(f"Shape after upsampling step 4: {generator_output.shape}")
for i in range(2):
generator_output = residual_block(generator_output, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after residual block: {generator_output.shape}")
# and downsampling back to 100 x 100
generator_output = Conv2D(filters=block_channels[1], kernel_size=(2, 2), strides=2, padding="valid", activation="relu")(generator_output)
for i in range(2):
generator_output = residual_block(generator_output, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
# TODO: add a downscaling and upscaling step here to improve spectral power?
# TODO: concantenate high res constant field with high res input features and maybe pass through some more residual blocks?
# and then edit the discriminator so that it matches.
# Concatenate with original size constants field and original size noise array?
# have to rename this layer to 'hr_noise_input_hr' becuase when it was 'noise_input_hr' that seemed to double count as both 'noise_input' and 'noise_input_hr'
# noise_input_hr = Input(shape=(None, None, noise_channels), name = "hr_noise_input_hr")
# print('hr noise input shape: ',noise_input_hr.shape)
# generator_output = concatenate([generator_output, const_input, noise_input_hr])
generator_output = concatenate([generator_output, const_input])
print(f"Shape after second concatenate: {generator_output.shape}")
# Pass through 3 residual blocks
for i in range(6):
generator_output = residual_block(generator_output, filters=filters_gen, conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after third residual block: {generator_output.shape}")
# define new activation function
def custom_activation(x):
return K.log(K.exp(x)+1)-K.log(K.exp((x-1)/1.1)+1)
get_custom_objects().update({'custom_activation': Activation(custom_activation)})
# Output layer
# generator_output = Conv2D(filters=1, kernel_size=(1, 1), activation='softplus', name="output")(generator_output)
generator_output = Conv2D(filters=1, kernel_size=(1, 1), activation='custom_activation', name="output")(generator_output)
print(f"Output shape: {generator_output.shape}")
if mode == 'GAN':
model = Model(inputs=[generator_input, const_input, noise_input, noise_input_hr], outputs=generator_output, name='gen')
# model = Model(inputs=[generator_input, noise_input], outputs=generator_output, name='gen')
return model
def discriminator(arch,
input_channels=6,
constant_fields=1, #2
filters_disc=64,
conv_size=(3, 3),
padding=None,
stride=1,
relu_alpha=0.2,
norm=None,
dropout_rate=None):
forceconv = True if arch == "forceconv" else False
# Network inputs
# low resolution condition
generator_input = Input(shape=(None, None, input_channels), name="lo_res_inputs")
print(f"generator_input shape: {generator_input.shape}")
# constant fields
const_input = Input(shape=(None, None, constant_fields), name="hi_res_inputs")
print(f"constants_input shape: {const_input.shape}")
# target image
generator_output = Input(shape=(None, None, 1), name="output")
print(f"generator_output shape: {generator_output.shape}")
# convolve down constant fields to match ERA
lo_res_const_input = const_upscale_block_100(const_input, filters=filters_disc)
# lo_res_const_input = const_upscale_block_5(const_input, filters=filters_disc)
print(f"upscaled constants shape: {lo_res_const_input.shape}")
print(f"Shape of generator input before disc concatenation: {generator_input.shape}")
print(tf.shape(generator_input))
print(f"Shape of low res const input before disc concatenation: {lo_res_const_input.shape}")
print(tf.shape(lo_res_const_input))
# new step: upscale number values to (1,1) to (5,5) to (10,10) for concatenation!
block_channels = [filters_disc, 2*filters_disc]
# block_channels = [1, 2]
lo_res_input = Dense(25, activation='relu')(generator_input)
lo_res_input = UpSampling2D(size=(5, 5), interpolation='bilinear')(lo_res_input)
print(f"Shape after upsampling lo_res_input input for disc step 1: {lo_res_input.shape}")
# add new concat step in here
# lo_res_input = concatenate([lo_res_input, lo_res_const_input])
# print(f"Shape after lo-res concatenate: {lo_res_input.shape}")
for i in range(1):
lo_res_input = residual_block(lo_res_input, filters=block_channels[0], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
lo_res_input = UpSampling2D(size=(2, 2), interpolation='bilinear')(lo_res_input)
print(f"Shape after upsampling lo_res_input input for disc step 2: {lo_res_input.shape}")
for i in range(1):
lo_res_input = residual_block(lo_res_input, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
# concatenate constants to lo-res input
# lo_res_input = concatenate([generator_input, lo_res_const_input])
# not concatenating here anymore, yes we are
lo_res_input = concatenate([lo_res_input, lo_res_const_input])
# lo_res_input = concatenate([generator_input])
# lo_res_input = generator_input
# print(f"Shape after lo-res concatenate: {lo_res_input.shape}")
# concatenate constants to hi-res input
hi_res_input = concatenate([generator_output, const_input])
# hi_res_input = generator_output
print(f"Shape after hi-res concatenate: {hi_res_input.shape}")
# encode inputs using residual blocks
block_channels = [filters_disc, 2*filters_disc]
# run through one set of RBs
for i in range(1):
lo_res_input = residual_block(lo_res_input, filters=block_channels[0], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape of lo-res input after residual block: {lo_res_input.shape}")
hi_res_input = Conv2D(filters=block_channels[0], kernel_size=(5, 5), strides=5, padding="valid", activation="relu")(hi_res_input)
print(f"Shape of hi_res_input after upsampling step 1: {hi_res_input.shape}")
for i in range(1):
hi_res_input = residual_block(hi_res_input, filters=block_channels[0], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape of hi-res input after residual block: {hi_res_input.shape}")
# run through second set of RBs
for i in range(1):
lo_res_input = residual_block(lo_res_input, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape of lo-res input after residual block: {lo_res_input.shape}")
hi_res_input = Conv2D(filters=block_channels[1], kernel_size=(2, 2), strides=2, padding="valid", activation="relu")(hi_res_input)
print(f"Shape of hi_res_input after upsampling step 2: {hi_res_input.shape}")
for i in range(1):
hi_res_input = residual_block(hi_res_input, filters=block_channels[1], conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after residual block: {hi_res_input.shape}")
print('End of first set of residual blocks')
# concatenate hi- and lo-res inputs channel-wise before passing through discriminator
print('lo-res-shape: ',lo_res_input.shape)
print('hi-res-shape: ',hi_res_input.shape)
disc_input = concatenate([lo_res_input, hi_res_input])
print(f"Shape after concatenating lo-res input and hi-res input: {disc_input.shape}")
# encode in residual blocks
for i in range(2):
disc_input = residual_block(disc_input, filters=filters_disc, conv_size=conv_size, stride=stride, relu_alpha=relu_alpha, norm=norm, dropout_rate=dropout_rate, padding=padding, force_1d_conv=forceconv)
print(f"Shape after residual block: {disc_input.shape}")
print('End of second residual block')
# discriminator output
disc_output = GlobalAveragePooling2D()(disc_input)
print(f"discriminator output shape after pooling: {disc_output.shape}")
disc_output = Dense(64, activation='relu')(disc_output)
print(f"discriminator output shape: {disc_output.shape}")
disc_output = Dense(1, name="disc_output")(disc_output)
print(f"discriminator output shape: {disc_output.shape}")
disc = Model(inputs=[generator_input, const_input, generator_output], outputs=disc_output, name='disc')
# disc = Model(inputs=[generator_input, generator_output], outputs=disc_output, name='disc')
return disc
| vosps/tropical_cyclone | wgan_no_rain/models.py | models.py | py | 15,850 | python | en | code | 8 | github-code | 36 |
18877148508 | # coding=utf-8
import frontik.handler
class Page(frontik.handler.PageHandler):
def get_page(self):
self_uri = self.request.host + self.request.path
invalid_json = self.get_argument('invalid', 'false')
data = {
'req1': self.post_url(self_uri, data={'param': 1}),
'req2': self.post_url(self_uri, data={'param': 2, 'invalid': invalid_json})
}
if self.get_argument('break', 'false') == 'true':
del data['req1']
self.set_template(self.get_argument('template', 'jinja.html'))
self.json.put(data)
def post_page(self):
invalid_json = self.get_argument('invalid', 'false') == 'true'
if not invalid_json:
self.json.put({
'result': self.get_argument('param')
})
else:
self.set_header('Content-Type', 'application/json')
self.text = '{"result": FAIL}'
| nekanek/frontik-without-testing | tests/projects/test_app/pages/json_page.py | json_page.py | py | 936 | python | en | code | 1 | github-code | 36 |
40761258837 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
stack_l1 = []
stack_l2 = []
while l1:
stack_l1.append(l1)
l1 = l1.next
while l2:
stack_l2.append(l2)
l2 = l2.next
quotient = 0
head = None
while stack_l1 or stack_l2:
v1 = stack_l1.pop().val if stack_l1 else 0
v2 = stack_l2.pop().val if stack_l2 else 0
remainder = v1 + v2
# Pop the top from both stacks and do calculation to get value
# and asign the value to new created node.
# quotient q and remainder r
quotient, remainder = divmod(quotient + remainder, 10)
# Create a head node init it as None
temp = head
head = ListNode(remainder)
head.next = temp
if quotient:
# We point head’s next pointer to the new created node
# and update the new node become head node.
temp = head
head = ListNode(quotient)
head.next = temp
return head | QingbiaoLi/LeetCodeFighter | List/445_AddTwoNumberII.py | 445_AddTwoNumberII.py | py | 1,371 | python | en | code | 0 | github-code | 36 |
28090834284 | import pytest
import config
import random
from datetime import datetime
from flask.testing import FlaskClient
from webapp import create_app, db
from flask import current_app
from webapp.models import Theme, Timebox, Task, Project
@pytest.fixture(scope='function')
def models():
return {'timebox': Timebox}
@pytest.fixture(scope='function')
def logged_in_client(database, app):
# Flask provides a way to test your application by exposing the Werkzeug test Client
# and handling the context locals for you.
testing_client = app.test_client()
# Establish an application context before running the tests.
ctx = app.app_context()
ctx.push()
r = testing_client.post('/register',
json=dict(username='mark', password='Password1'))
r2 = testing_client.post('/login',
json=dict(username='mark', password='Password1'))
yield testing_client # this is where the testing happens!
ctx.pop()
@pytest.fixture(scope='session')
def app():
flask_app = create_app(config.TestConfig)
return flask_app
@pytest.fixture(scope='function')
def database(app):
# app is an instance of a flask app, _db a SQLAlchemy DB
with app.app_context():
db.create_all()
yield db
# Explicitly close DB connection
db.session.close()
db.drop_all()
@pytest.fixture(scope='function')
def test_client(database, app):
# Flask provides a way to test your application by exposing the Werkzeug test Client
# and handling the context locals for you.
testing_client = app.test_client()
# Establish an application context before running the tests.
ctx = app.app_context()
ctx.push()
yield testing_client # this is where the testing happens!
ctx.pop()
@pytest.fixture(scope='function')
def project(database):
p = Project(title='test project', project_type='board')
db.session.add(p)
db.session.commit()
return p
@pytest.fixture(scope='function')
def sample_data(database, logged_in_client):
logged_in_client.post('add_project', json={'title': 'test project 1', 'project_type': 'board'})
logged_in_client.post('add_project', json={'title': 'test project 2', 'project_type': 'board'})
p1 = Project.query.filter_by(title='test project 1').first()
p2 = Project.query.filter_by(title='test project 2').first()
logged_in_client.post('/add_theme', json={'project_id': p1.id, 'title': 'test theme 11'})
logged_in_client.post('/add_theme', json={'project_id': p1.id, 'title': 'test theme 12'})
logged_in_client.post('/add_theme', json={'project_id': p2.id, 'title': 'test theme 21'})
logged_in_client.post('/add_theme', json={'project_id': p2.id, 'title': 'test theme 22'})
logged_in_client.post('add_timebox', json={'project_id': p1.id, 'title': 'To Do This Week', 'goal': []})
logged_in_client.post('add_timebox', json={'project_id': p2.id, 'title': 'To Do This Week', 'goal': 'feel good'})
logged_in_client.post('add_task', json={'project_id': p1.id, 'title': 'test task A'})
logged_in_client.post('add_task', json={'project_id': p1.id, 'title': 'test task B'})
logged_in_client.post('add_task', json={'project_id': p1.id, 'title': 'test task C'})
logged_in_client.post('add_task', json={'project_id': p1.id, 'title': 'test task D'})
logged_in_client.post('add_task', json={'project_id': p2.id, 'title': 'test task E'})
logged_in_client.post('add_task', json={'project_id': p2.id, 'title': 'test task F'})
logged_in_client.post('add_task', json={'project_id': p2.id, 'title': 'test task G'})
logged_in_client.post('add_task', json={'project_id': p2.id, 'title': 'test task H'})
logged_in_client.post('add_subtask', json={'project_id': p1.id, 'task_id':2, 'title': 'test subtask 1'})
@pytest.fixture(scope='function')
def random_data(database):
statuses = ['To Do', 'In Progress', 'Done']
verbs = ['Do', 'Make', 'Watch', 'Learn', 'Find', 'Investigate', 'Tidy', 'Book']
nouns = ['Garden', 'TV', 'Kitchen', 'TV', 'Cinema', 'Homework', 'Laundry', 'Holiday']
events = ['Tomorrow', 'Sunday', 'Christmas', 'Holidays', 'Birth', 'Wedding']
projects = []
themes = []
timeboxes = []
tasks = []
for i in range(random.randint(1,4)):
p = Project(title=random.choice(nouns) + ' List ' + str(random.randint(1,10)))
projects.append(p)
for p in projects:
for i in range(random.randint(1,7)):
th = Theme(project=p, title=random.choice(verbs)+'ing things')
themes.append(th)
backlog = Timebox(project=p, title='Backlog', status='To Do')
timeboxes.append(backlog)
for i in range(1,3):
tb = Timebox(project=p, title='To do before ' + random.choice(events),
status=random.choice(['To Do', 'In Progress', 'Closed']))
timeboxes.append(tb)
for i in p.timeboxes.all():
for j in range(1,10):
t = Task(project=p,
theme=random.choice(p.themes.all()),
title=random.choice(verbs) + ' ' + random.choice(nouns),
status=random.choice(statuses),
priority=j
)
t.add_to_timebox(i)
tasks.append(t)
db.session.add_all(projects)
db.session.add_all(themes)
db.session.add_all(timeboxes)
db.session.add_all(tasks)
db.session.commit()
data = {
'projects': projects,
'themes': themes,
'timeboxes': timeboxes,
'tasks': tasks
}
return data
| thekitbag/todoodleoo-server | tests/conftest.py | conftest.py | py | 5,630 | python | en | code | 0 | github-code | 36 |
35416395710 | import mnml
from wiki import Wiki
from tmpl import Tmpl, quote, unquote
wiki = Wiki()
class T(Tmpl):
_base = """<html>
<head><title>Wiki</title></head>
<body><h1>${block t}${endblock}</h1>${block c}${endblock}</body>
</html>"""
_index = """${extends base}
${block t}All Pages${endblock}
${block c}<ul>
${for page in pages}<li><a href="/view/${page|u}">${page|e}</a></li>${endfor}
</ul>${endblock}"""
_view = """${extends base}
${block t}${name|e}${endblock}
${block c}${text}<p><a href="/edit/${name|u}">Edit</a></p>${endblock}"""
_edit = """${extends base}
${block t}Edit ${name|e}${endblock}
${block c}<form method="post" action="/edit/${name|u}">
<textarea name="text" cols="60" rows="15">${text|e}</textarea><br/>
<input type="submit" value="Save"/>
or <a href="/view/${name|u}">Cancel</a>
</form>${endblock}"""
def template(tmpl, **kwargs):
return mnml.HttpResponse(T.render(T.get(tmpl), kwargs))
class Index(mnml.RequestHandler):
def GET(self):
return template("index", pages=wiki.pages())
class View(mnml.RequestHandler):
def GET(self, name):
name = unquote(name)
return template("view", name=name,
text=Wiki.format(wiki.get_page(name), lambda n: "/view/%s" % quote(n)))
class Edit(mnml.RequestHandler):
def GET(self, name):
name = unquote(name)
return template("edit", name=name, text=wiki.get_page(name))
def POST(self, name):
name = unquote(name)
wiki.set_page(name, self.request.POST.getfirst('text'))
return mnml.HttpResponseRedirect("/view/%s" % quote(name))
application = mnml.TokenBasedApplication((
('/index', Index),
('/view/:name', View),
('/edit/:name', Edit),
))
if __name__ == '__main__':
mnml.development_server(application) | sma/microwebframeworks | mnml-tmpl.py | mnml-tmpl.py | py | 1,863 | python | en | code | 6 | github-code | 36 |
2911463134 | import torch.nn as nn
import torch.nn.functional as F
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=(3, 3), stride=1, padding=0)
self.conv2 = nn.Conv2d(3, 6, kernel_size=(4, 4), stride=1, padding=0)
self.maxpool1 = nn.MaxPool2d(kernel_size=(3, 3), stride=2, padding=0)
self.fullCon1 = nn.Linear(in_features=6 * 11 * 11, out_features=360)
self.fullCon2 = nn.Linear(in_features=360, out_features=100)
self.fullCon3 = nn.Linear(in_features=100, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.maxpool1(F.relu(self.conv2(x)))
x = x.view(-1, 6 * 11 * 11)
x = F.relu(self.fullCon1(x))
x = F.relu(self.fullCon2(x))
x = self.fullCon3(x)
return x
| arunsanknar/AlectioExamples | image_classification/fashion-mnist-and-mnist/model.py | model.py | py | 870 | python | en | code | 0 | github-code | 36 |
2762523271 | from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
app = Flask(__name__)
line_bot_api = LineBotApi('1l6c8hOlVNiLh23YRFrdl1TxJxK4KUZppI9dRaDscY5fX50D6xEBhb4D0ZglujEA1+MiFoFV2N5pl1KIYZmlq8/WSmxf2b4WVhcvfjJoUH7ISxjUDK55FzS1B3DhC6X4/m4ZM0/0bN7HRNzLzKToewdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('3692fbc3db90c226b12e3f91130e2f9f')
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text))
| vacharich1/testme | bot_test.py | bot_test.py | py | 1,126 | python | en | code | 0 | github-code | 36 |
9045055933 | from typing import Optional
from xml.etree.ElementTree import Element, Comment
from api.mvc.model.data.content_model import ContentModel
from api.mvc.model.data.data_model import DataModel
from api.mvc.model.data.data_type import DataType
from api_core.exception.api_exception import ApiException
from api_core.helper.file_folder_helper import FileFolderHelper
from api_core.helper.string_helper import StringHelper
from api_core.mvc.service.file.xml_file_service import XmlFileService
class ContentModelFileService(XmlFileService):
"""
Class used to manage content-model XML files.
"""
def __init__(self):
"""
Initialize a new instance of 'ContentModelService' class.
"""
super().__init__(True, {"xmlns": "http://www.alfresco.org/model/dictionary/1.0"})
def extract_content_model_prefix(self, content_model_file_path: str) -> str:
"""
Extracts the content model prefix.
:param content_model_file_path: The path to the content model file.
:return: The content model prefix.
"""
root: Element = self._get_root(content_model_file_path)
filename: str = FileFolderHelper.extract_filename_from_path(content_model_file_path)
# Verification that the attribute exists.
if not ("name" in root.attrib):
raise ApiException("Content model file '{0}' does not have the necessary 'namespace' node."
.format(filename))
# Verification that the attribute has a value.
if StringHelper.is_empty(root.attrib["name"]):
raise ApiException("The 'name' attribute of the content model file '{0}' is not entered. The latter is "
"mandatory.".format(filename))
# Data recovery.
try:
return root.attrib["name"].rsplit(":", 1)[0]
except IndexError:
raise ApiException("The value of the 'name' attribute of the source node is invalid. This must be composed "
"as follows: prefix:name")
def extract_content_model_name(self, content_model_file_path: str) -> str:
"""
Extracts the content model name.
:param content_model_file_path: The path to the content model file.
:return: The content model name.
"""
root: Element = self._get_root(content_model_file_path)
filename: str = FileFolderHelper.extract_filename_from_path(content_model_file_path)
# Verification that the attribute exists.
if not ("name" in root.attrib):
raise ApiException("Content model file '{0}' does not have the necessary 'namespace' node."
.format(filename))
# Verification that the attribute has a value.
if StringHelper.is_empty(root.attrib["name"]):
raise ApiException("The 'name' attribute of the content model file '{0}' is not entered. The latter is "
"mandatory.".format(filename))
# Data recovery.
try:
return root.attrib["name"].rsplit(":", 1)[1]
except IndexError:
raise ApiException("The value of the 'name' attribute of the source node is invalid. This must be composed "
"as follows: prefix:name")
def create_content_model(self, content_model_file_path: str, prefix: str, name: str, description: Optional[str],
author: Optional[str]):
model: Element = Element("model")
model.set("name", "{0}:{1}".format(prefix, name))
# Set xml namespace.
if self.namespaces is not None:
for item in self.namespaces:
model.set(item[0], item[1])
model.append(Comment(" Optional meta-data about the model "))
# Set the description
description_node: Element = Element("description")
description_node.text = description if description is not None else "SET THE PROJECT DESCRIPTION"
model.append(description_node)
# Set the author
author_node: Element = Element("author")
author_node.text = author if author is not None else "Alfresco Helper Script 1.0.0"
model.append(author_node)
# Set the version
version_node: Element = Element("version")
version_node.text = "1.0.0"
model.append(version_node)
# Set the imports
imports_node: Element = Element("imports")
imports_node.append(Comment(" Import Alfresco Dictionary Definitions "))
# First import
import1: Element = Element("import")
import1.set("uri", "http://www.alfresco.org/model/dictionary/1.0")
import1.set("prefix", "d")
imports_node.append(import1)
# Second import
import2: Element = Element("import")
import2.set("uri", "http://www.alfresco.org/model/content/1.0")
import2.set("prefix", "cm")
imports_node.append(import2)
# Set the namespaces.
namespaces_node: Element = Element("namespaces")
# Set a namespace
namespace_node: Element = Element("namespace")
namespace_node.set("uri", "http://www.{0}.org/model/content/1.0".format(name.lower()))
namespace_node.set("prefix", prefix)
namespaces_node.append(namespace_node)
# Add the import to the model.
model.append(imports_node)
# Add the import to the model.
model.append(Comment(" Custom namespace for the '{0}:{1}' model ".format(prefix, name)))
model.append(namespaces_node)
types: Element = Element("types")
aspects: Element = Element("aspects")
model.append(types)
model.append(aspects)
# Write the XML file.
self._write(model, content_model_file_path)
def find_data(self, content_model: ContentModel, typology: str, data: str) -> Optional[Element]:
"""
Finds data's node in its content model.
:param content_model: A data model of a content-model.
:param typology: The data typology.
:param data: The name of the data.
:return: The data node, otherwise None.
"""
return self._get_root(content_model.path).find(".//{0}{3}s/{0}{3}[@name='{1}:{2}']".format(
self.get_namespace("xmlns"), content_model.prefix, data, typology))
def find_aspect(self, content_model: ContentModel, aspect: str) -> Optional[Element]:
"""
Finds an aspect's node in its content model.
:param content_model: A data model of a content-model.
:param aspect: The name of the aspect.
:return: The aspect node, otherwise None.
"""
return self._get_root(content_model.path).find(".//{0}aspects/{0}aspect[@name='{1}:{2}']".format(
self.get_namespace("xmlns"), content_model.prefix, aspect))
def get_aspects_name(self, content_model: ContentModel) -> list[str]:
"""
Finds an all aspects name in its content model.
:param content_model: A data model of a content-model.
:return: The list of aspects name.
"""
aspects: list[str] = []
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
for aspect in self._get_root(content_model.path).findall(".//{0}aspects/{0}aspect".format(
self.get_namespace("xmlns"))):
aspects.append(self.__extract_aspect_name(aspect, filename))
return aspects
def get_data_names(self, content_model: ContentModel, typology: str) -> list[str]:
"""
Finds an all aspects name in its content model.
:param typology: The type of the data to get.
:param content_model: A data model of a content-model.
:return: The list of aspects name.
"""
data_names: list[str] = []
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
for data in self._get_root(content_model.path).findall(".//{0}{1}s/{0}{1}".format(
self.get_namespace("xmlns"), typology)):
data_names.append(self.__extract_data_name(data, typology, filename))
return data_names
def find_type(self, content_model: ContentModel, type_name: str) -> Optional[Element]:
return self._get_root(content_model.path).find(".//{0}types/{0}type[@name='{1}:{2}']".format(
self.get_namespace("xmlns"), content_model.prefix, type_name))
def add_aspect(self, content_model: ContentModel, name: str, title: str, description: str):
root: Element = self._get_root(content_model.path)
aspect: Element = Element("aspect")
aspect.set("name", "{0}:{1}".format(content_model.prefix, name))
if not StringHelper.is_empty(title):
title_node: Element = Element("title")
title_node.text = title
aspect.append(title_node)
if not StringHelper.is_empty(description):
description_node: Element = Element("description")
description_node.text = description
aspect.append(description_node)
properties: Element = Element("properties")
aspect.append(properties)
add_to_root: bool = False
aspects: Element = root.find(".//{0}aspects".format(self.get_namespace("xmlns"), content_model.prefix, aspect))
if aspects is None:
aspects = Element("aspects")
add_to_root = True
aspects.append(Comment(" Definition of aspect '{0}'. ".format(name)))
aspects.append(aspect)
if add_to_root:
root.append(aspects)
self._write(root, content_model.path)
def add_type(self, content_model: ContentModel, name: str, title: str, description: str):
root: Element = self._get_root(content_model.path)
type_node: Element = Element("type")
type_node.set("name", "{0}:{1}".format(content_model.prefix, name))
if not StringHelper.is_empty(title):
title_node: Element = Element("title")
title_node.text = title
type_node.append(title_node)
if not StringHelper.is_empty(description):
description_node: Element = Element("description")
description_node.text = description
type_node.append(description_node)
properties: Element = Element("properties")
type_node.append(properties)
add_to_root: bool = False
types: Element = root.find(".//{0}types".format(self.get_namespace("xmlns"), content_model.prefix, type_node))
if types is None:
types = Element("types")
add_to_root = True
types.append(Comment(" Definition of type '{0}'. ".format(name)))
types.append(type_node)
self._write(root, content_model.path)
def add_property(self, content_model: ContentModel, data: DataModel, name: str, title: Optional[str],
description: Optional[str], typology: str, mandatory: bool):
root: Element = self._get_root(content_model.path)
# Create the property
prop: Element = Element("property")
prop.set("name", "{0}:{1}".format(content_model.prefix, name))
# Set the property's title.
if not StringHelper.is_empty(title):
title_node: Element = Element("title")
title_node.text = title
prop.append(title_node)
# Set the property's description.
if not StringHelper.is_empty(description):
description_node: Element = Element("description")
description_node.text = description
prop.append(description_node)
# Set the property's type.
type_node: Element = Element("type")
type_node.text = "d:{0}".format(typology)
prop.append(type_node)
# Set the property's mandatory.
mandatory_node: Element = Element("mandatory")
mandatory_node.text = "true" if mandatory else "false"
prop.append(mandatory_node)
data_node: Optional[Element] = root.find(".//{0}{3}s/{0}{3}[@name='{1}:{2}']"
.format(self.get_namespace("xmlns"), content_model.prefix, data.name,
data.typology))
add_to_data: bool = False
properties_node: Element = data_node.find(".//{0}properties".format(self.get_namespace("xmlns")))
if properties_node is None:
properties_node = Element("properties")
add_to_data = True
properties_node.append(prop)
if add_to_data:
data_node.append(properties_node)
self._write(root, content_model.path)
def add_extension(self, content_model: ContentModel, source: DataModel, parent: DataModel):
namespace: str = self.get_namespace("xmlns")
root: Element = self._get_root(content_model.path)
source_node: Element = root.find(".//{0}{1}s/{0}{1}[@name='{2}:{3}']"
.format(namespace, source.typology, content_model.prefix, source.name))
parent_node: Optional[Element] = source_node.find("./{0}parent".format(namespace))
add_parent: bool = True if parent_node is None else False
if add_parent:
parent_node = Element("parent")
parent_node.text = "{0}".format(parent.complete_name)
if add_parent:
source_node.insert(self.__get_properties_node_index(source_node), parent_node)
self._write(root, content_model.path)
def add_mandatory(self, content_model: ContentModel, source: DataModel, mandatory: DataModel):
namespace: str = self.get_namespace("xmlns")
root: Element = self._get_root(content_model.path)
source_node: Element = root.find(".//{0}{1}s/{0}{1}[@name='{2}:{3}']"
.format(namespace, source.typology, content_model.prefix, source.name))
mandatory_node: Optional[Element] = source_node.find("./{0}mandatory-aspects".format(namespace))
aspect: Element = Element("aspect")
aspect.text = "{0}:{1}".format(content_model.prefix, mandatory.name)
add_mandatory_node: bool = True if mandatory_node is None else False
if add_mandatory_node:
mandatory_node = Element("mandatory-aspects")
mandatory_node.append(aspect)
if add_mandatory_node:
source_node.append(mandatory_node)
self._write(root, content_model.path)
def get_aspect_description(self, content_model: ContentModel, name: str) -> Optional[str]:
"""
Retrieve the value of the description node of an aspect node.
:param content_model: A data model of a content-model.
:param name: The name of the aspect node.
:return: The value of the aspect's description node.
"""
return self.__get_data_description(content_model, DataType.ASPECT.name, name)
def get_type_description(self, content_model: ContentModel, name: str) -> Optional[str]:
"""
Retrieve the value of the description node of a type node.
:param content_model: A data model of a content-model.
:param name: The name of the type node.
:return: The value of the type's description node.
"""
return self.__get_data_description(content_model, DataType.TYPE.name, name)
def get_aspect_title(self, content_model: ContentModel, name: str) -> Optional[str]:
"""
Retrieve the value of the title node of an aspect node.
:param content_model: A data model of a content-model.
:param name: The name of the aspect node.
:return: The value of the aspect's title node.
"""
return self.__get_data_title(content_model, DataType.ASPECT.value, name)
def get_aspect_parent(self, content_model: ContentModel, name: str) -> Optional[str]:
"""
Retrieve the value of the parent node of an aspect node.
:param content_model: A data model of a content-model.
:param name: The name of the aspect node.
:return: The value of the aspect's parent node.
"""
return self.get_data_parent(content_model, DataType.ASPECT.value, name)
def get_aspect_mandatory_aspects(self, content_model: ContentModel, name: str) -> list[str]:
return self.__get_data_mandatory_aspects(content_model, DataType.ASPECT.value, name)
def get_type_title(self, content_model: ContentModel, name: str) -> Optional[str]:
"""
Retrieve the value of the title node of a type node.
:param content_model: A data model of a content-model.
:param name: The name of the type node.
:return: The value of the type's title node.
"""
return self.__get_data_title(content_model, DataType.TYPE.name, name)
def get_type_parent(self, content_model: ContentModel, name: str) -> Optional[str]:
"""
Retrieve the value of the title node of a type node.
:param content_model: A data model of a content-model.
:param name: The name of the type node.
:return: The value of the type's title node.
"""
return self.__get_data_title(content_model, DataType.TYPE.name, name)
def __extract_aspect_name(self, aspect: Element, filename: str) -> str:
"""
Extracts the aspect node name.
:param aspect: The aspect node.
:return: The aspect name.
"""
return self.__extract_data_name(aspect, DataType.ASPECT.name, filename)
def get_type_mandatory_aspects(self, content_model: ContentModel, name: str) -> list[str]:
return self.__get_data_mandatory_aspects(content_model, DataType.TYPE.value, name)
def __extract_type_name(self, type_node: Element, filename: str) -> str:
"""
Extracts the aspect node name.
:param type_node: The type node.
:return: The type name.
"""
return self.__extract_data_name(type_node, DataType.TYPE.value, filename)
@staticmethod
def __extract_data_name(data: Element, typology: str, filename: str) -> str:
"""
Extracts the aspect node name.
:param data: The aspect model.
:return: The aspect name.
"""
# Verification that the attribute exists.
if not ("name" in data.attrib):
raise ApiException("There is {1} in file '{0}' that has not been defined correctly. It lacks the "
"'name' attribute."
.format(filename, "an aspect" if typology.__eq__("aspect") else "a type"))
# Verification that the attribute has a value.
if StringHelper.is_empty(data.attrib["name"]):
raise ApiException("There is {1} in file '{0}' that has not been defined correctly. The 'name' "
"attribute is null or empty."
.format(filename, "an aspect" if typology.__eq__("aspect") else "a type"))
# Data recovery.
try:
return data.attrib["name"].rsplit(":", 1)[1]
except IndexError:
raise ApiException("There is {1} in file '{0}' whose name attribute was not set correctly. The "
"attribute value must be composed as follows: prefix:name"
.format(filename, "an aspect" if typology.__eq__("aspect") else "a type"))
@staticmethod
def __extract_property_name(prop: Element, filename: str) -> str:
"""
Extracts the aspect node name.
:param prop: The property node.
:return: The aspect name.
"""
# Verification that the attribute exists.
if not ("name" in prop.attrib):
raise ApiException("There is a property in file '{0}' that has not been defined correctly. It lacks the "
"'name' attribute."
.format(filename))
# Verification that the attribute has a value.
if StringHelper.is_empty(prop.attrib["name"]):
raise ApiException("There is a property in file '{0}' that has not been defined correctly. The 'name' "
"attribute is null or empty."
.format(filename))
# Data recovery.
try:
return prop.attrib["name"].rsplit(":", 1)[1]
except IndexError:
raise ApiException("There is a property in file '{0}' whose name attribute was not set correctly. The "
"attribute value must be composed as follows: prefix:name"
.format(filename))
def __get_data_description(self, content_model: ContentModel, typology: str, name: str) -> Optional[str]:
"""
Retrieve the value of the description node of a data node (aspect or type).
:param content_model: A data model of a content-model.
:param typology: The type of the node (aspect or type).
:param name: The name of the data node.
:return: The value of the data node description node.
"""
description: Element = self._get_root(content_model.path) \
.find(".//{0}{1}s/{0}{1}[@name='{2}:{3}']/{0}description"
.format(self.get_namespace("xmlns"), typology, content_model.prefix, name))
return None if description is None else description.text
def __get_data_title(self, content_model: ContentModel, typology: str, name: str) -> Optional[str]:
"""
Retrieve the value of the title node of a data node (aspect or type).
:param content_model: A data model of a content-model.
:param typology: The type of the node (aspect or type).
:param name: The name of the data node.
:return: The value of the data node title node.
"""
title: Element = self._get_root(content_model.path) \
.find(".//{0}{1}s/{0}{1}[@name='{2}:{3}']/{0}title"
.format(self.get_namespace("xmlns"), typology, content_model.prefix, name))
return None if title is None else title.text
def get_data_parent(self, content_model: ContentModel, typology: str, name: str) -> Optional[str]:
"""
Retrieve the value of the parent node of a data node (aspect or type).
:param content_model: A data model of a content-model.
:param typology: The type of the node (aspect or type).
:param name: The name of the data node.
:return: The value of the data node title node.
"""
parent: Element = self._get_root(content_model.path) \
.find(".//{0}{1}s/{0}{1}[@name='{2}:{3}']/{0}parent"
.format(self.get_namespace("xmlns"), typology, content_model.prefix, name))
return None if parent is None else parent.text
def __get_data_mandatory_aspects(self, content_model: ContentModel, typology: str, name: str) -> list[str]:
result: list[str] = []
root: Element = self._get_root(content_model.path)
mandatory_aspects: list[Element] = root.findall(".//{0}{1}s/{0}{1}[@name='{2}:{3}']/{0}mandatory-aspects"
"/{0}aspect".format(self.get_namespace("xmlns"), typology,
content_model.prefix, name))
for mandatory_aspect in mandatory_aspects:
result.append(mandatory_aspect.text)
return result
def __get_properties_node_index(self, data_node: Element) -> int:
namespace: str = self.get_namespace("xmlns")
children: list[Element] = data_node.findall(".//{0}*".format(namespace))
maximum: int = len(children)
index: int = 0
while index.__lt__(maximum) and children[index].tag.__ne__("{0}properties".format(namespace)):
index += 1
return index if index.__lt__(maximum) else (index - 1)
def get_property(self, content_model: ContentModel, data: DataModel, property_name: str) \
-> tuple[str, str, str, str, bool]:
namespace: str = self.get_namespace("xmlns")
root: Element = self._get_root(content_model.path)
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
node: Element = root.find(".//{0}{1}s/{0}{1}[@name='{2}:{3}']/{0}properties/{0}property[@name='{2}:{4}']"
.format(namespace, data.typology, content_model.prefix, data.name, property_name))
if node is None:
ApiException("There is no property named '{0}' in {1} '{2}' in content model '{3}' in file '{4}'."
.format(property_name, data.typology, data.name, content_model.complete_name, filename))
title_node: Element = node.find("./{0}title".format(namespace))
title: Optional[str] = None if title_node is None else title_node.text
description_node: Element = node.find("./{0}description".format(namespace))
description: Optional[str] = None if description_node is None else description_node.text
type_node: Element = node.find("./{0}type".format(namespace))
typology: Optional[str] = None
if type_node is not None:
if StringHelper.is_empty(type_node.text):
raise ApiException("The type of the {0} property from the content-model '{1}' of file '{2}' is invalid."
" It cannot be empty or None."
.format(data.typology, content_model.complete_name,
FileFolderHelper.extract_filename_from_path(content_model.path)))
elif StringHelper.has_space(type_node.text):
raise ApiException("The type of the {0} property from the content-model '{1}' of file '{2}' is invalid."
" There can be no space in it."
.format(data.typology, content_model.complete_name,
FileFolderHelper.extract_filename_from_path(content_model.path)))
try:
typology = type_node.text.rsplit(":", 1)[1]
if (typology.__ne__("text") and typology.__ne__("int") and typology.__ne__("long")
and typology.__ne__("float") and typology.__ne__("double") and typology.__ne__("date")
and typology.__ne__("datetime") and typology.__ne__("boolean") and typology.__ne__("encrypted")
and typology.__ne__("noderef")):
raise ApiException(
"The type of the {0} property from the content-model '{1}' of file '{2}' is invalid. Its value"
" must be: text, int, long, float, double, date, datetime, boolean, encrypted or noderef."
.format(data.typology, content_model.complete_name,
FileFolderHelper.extract_filename_from_path(content_model.path)))
except IndexError:
raise ApiException("The value of property type '{0}' of {1} '{2}' of content model '{3}' of file '{4}'"
" is invalid. It should be formed like this: d:[type]"
.format(property_name, data.typology, data.name, content_model.complete_name,
filename))
mandatory_node: Element = node.find("./{0}mandatory".format(namespace))
mandatory: bool = False
if mandatory_node is not None:
if StringHelper.is_empty(mandatory_node.text):
raise ApiException("The value of property 'mandatory' '{0}' of {1} {2} of content model {3} of file "
"{4} is invalid. A value must be set ('true' or 'false').")
elif mandatory_node.text.__eq__("true"):
mandatory = True
elif mandatory_node.text.__eq__("false"):
mandatory = False
else:
raise ApiException("The value of property 'mandatory' '{0}' of {1} {2} of content model {3} of file "
"{4} is invalid. The value can only be 'true' or 'false'."
.format(property_name, data.typology, data.name, content_model.complete_name,
filename))
return property_name, title, description, typology, mandatory
def get_properties(self, content_model: ContentModel) -> list[str]:
result: list[str] = []
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
root: Element = self._get_root(content_model.path)
for prop in root.findall(".//{0}aspects/{0}aspect/{0}properties/{0}property".format(
self.get_namespace("xmlns"))):
result.append(self.__extract_property_name(prop, filename))
for prop in root.findall(".//{0}types/{0}type/{0}properties/{0}property".format(
self.get_namespace("xmlns"))):
result.append(self.__extract_property_name(prop, filename))
return result
def get_data_property_names(self, content_model: ContentModel, data: DataModel) -> list[str]:
"""
Retrieve a list of property names from a data model.
:param content_model: A data model of a content-model.
:param data:
:return:
"""
# Result initialization.
result: list[str] = []
# Retrieving model properties.
namespace: str = self.get_namespace("xmlns")
root: Element = self._get_root(content_model.path)
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
properties: list[Element] = root.findall(".//{0}{1}s/{0}{1}[@name='{2}:{3}']/{0}properties/{0}property"
.format(namespace, data.typology, content_model.prefix, data.name))
# Extract property names.
for prop in properties:
result.append(self.__extract_property_name(prop, filename))
# Return of the result.
return result
| seedbaobab/alfresco_helper | api/mvc/model/service/file/content_model_service.py | content_model_service.py | py | 30,329 | python | en | code | 0 | github-code | 36 |
5929748102 | from flask_restx import Namespace, Resource, reqparse
from main.model.ORM import *
search_ns = Namespace('searching', description='search recipes by either recipe\'s name or ingredients list')
search_name_rep = reqparse.RequestParser()
search_name_rep.add_argument('name', type=str)
search_name_rep.add_argument('email', type=str)
@search_ns.route('/by_name')
class Searching_by_Name(Resource):
@search_ns.expect(search_name_rep)
def post(self):
args = search_name_rep.parse_args()
name = args['name']
recipe = RecipeDB.query.filter_by(name=name).first()
if not recipe:
return {'error': 'recipe does not exist'}
else:
# update user explore history
user = UserDB.query.filter_by(email=args['email']).first()
explore_list = eval(user.explore_list)
if recipe.id not in explore_list:
if len(explore_list) == 10:
del explore_list[0]
explore_list.append(recipe.id)
user.explore_list = str(explore_list)
db.session.commit()
##
contributor = UserDB.query.filter_by(id=recipe.contributed_by).first()
return {
'recipe_name': recipe.name,
'rate': recipe.average_rate,
'external_link': recipe.external_link,
'contributor': {'email': contributor.email,
'name': contributor.name,
'is_followed': contributor.id in eval(user.follow_list),
},
'category': recipe.category,
'ingredients': [IngredientDB.query.filter_by(id=id).first().name for id in
recipe.ingredients_list.split(',')],
'comments': [i.text for i in recipe.comments_id],
'is_favourite': recipe.id in eval(user.favourite_list)
}
search_list_rep = reqparse.RequestParser()
search_list_rep.add_argument('ingredients_list', type=str) # , location='args')
@search_ns.route('/by_list')
# 名字 分数 图片 食材有几种
class Searching_by_list(Resource):
# 先返回所有匹配上的菜谱的简略信息,用户点开具体某一个之后前端再向后端请求具体信息
@search_ns.expect(search_list_rep)
def post(self):
ingredients_str = search_list_rep.parse_args()['ingredients_list']
have_id_list = [IngredientDB.query.filter_by(name=name).first().id for name in ingredients_str.split(',')]
has_fully_covered = 0
return_format = []
for i in db.session.query(RecipeDB.name, RecipeDB.ingredients_list, RecipeDB.average_rate,
RecipeDB.category).order_by(
RecipeDB.average_rate.desc()).all():
need_id_list = i.ingredients_list.split(',')
miss_id_list = [int(j) for j in need_id_list if int(j) not in have_id_list]
# 如果缺少大于1 或者 食谱只需要一个食材还缺少
if len(miss_id_list) > 1 or (miss_id_list and len(need_id_list) == 1):
continue
if not miss_id_list:
has_fully_covered = 1
return_format.append({'name': i.name,
'rate': i.average_rate,
'category': i.category,
'n_ingredients': len(need_id_list),
'missing': IngredientDB.query.filter_by(id=miss_id_list[0]).first().name if len(
miss_id_list) else None})
if not has_fully_covered:
self._update_SearchDB(have_id_list)
return return_format
def _update_SearchDB(self, id_list):
searched_id_str = ','.join('%s' % n for n in sorted(id_list))
searched_list = IngredientSearchDB.query.filter_by(ingredients_list=searched_id_str).first()
if searched_list:
searched_list.times += 1
else:
new_searched_list = IngredientSearchDB(
ingredients_list=searched_id_str,
)
db.session.add(new_searched_list)
db.session.commit()
return
| SHFeMIX/Comp3900 | backend/main/controller/search.py | search.py | py | 4,399 | python | en | code | 3 | github-code | 36 |
28517096997 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable, ln
from variable_functions import my_attribute_label
class county_id(Variable):
"""county id of household"""
def dependencies(self):
return [my_attribute_label("grid_id"),
"urbansim.gridcell.county_id"
]
def compute(self, dataset_pool):
gridcells = dataset_pool.get_dataset('gridcell')
return self.get_dataset().get_join_data(gridcells, "county_id")
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from numpy import array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
variable_name = "psrc.household.county_id"
def test_my_inputs( self ):
values = VariableTestToolbox().compute_variable( self.variable_name, \
{"household":{ \
"grid_id":array([1, 1, 2, 3, 4]),
},
"gridcell":{
"grid_id":array([1, 2, 3, 4]),
"county_id":array([33, 31, 21, 33])
}
}, \
dataset = "household" )
should_be = array( [33, 33, 31, 21, 33] )
self.assertEqual( ma.allclose( values, should_be, rtol=1e-7 ), \
True, msg = "Error in " + self.variable_name )
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | psrc/household/county_id.py | county_id.py | py | 1,610 | python | en | code | 4 | github-code | 36 |
18515443128 | from sets import Set
from collections import defaultdict
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
self.data = defaultdict(int)
self.original = Set(dict)
for w in dict:
for i in xrange(len(w)):
can = w[:i]+"_"+w[i+1:]
self.data[can] += 1
# print can
# print self.data
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
double = False
if word in self.original:
double = True
for i in xrange(len(word)):
can = word[:i]+"_"+word[i+1:]
if double:
if can in self.data and self.data[can] >= 2:
return True
else:
if can in self.data:
return True
return False
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
#Implement Magic Dictionary | jimmy623/LeetCode | Solutions/Implement Magic Dictionary.py | Implement Magic Dictionary.py | py | 1,413 | python | en | code | 0 | github-code | 36 |
28519396947 | # PopGen 1.1 is A Synthetic Population Generator for Advanced
# Microsimulation Models of Travel Demand
# Copyright (C) 2009, Arizona State University
# See PopGen/License
DEFAULT_PERSON_PUMS2000_QUERIES = [ "alter table person_pums add column agep bigint",
"alter table person_pums add column gender bigint",
"alter table person_pums add column race bigint",
"alter table person_pums add column employment bigint",
"update person_pums set agep = 1 where age < 5",
"update person_pums set agep = 2 where age >= 5 and age < 15",
"update person_pums set agep = 3 where age >= 15 and age < 25",
"update person_pums set agep = 4 where age >= 25 and age < 35",
"update person_pums set agep = 5 where age >= 35 and age < 45",
"update person_pums set agep = 6 where age >= 45 and age < 55",
"update person_pums set agep = 7 where age >= 55 and age < 65",
"update person_pums set agep = 8 where age >= 65 and age < 75",
"update person_pums set agep = 9 where age >= 75 and age < 85",
"update person_pums set agep = 10 where age >= 85",
"update person_pums set gender = sex",
"update person_pums set race = 1 where race1 = 1",
"update person_pums set race = 2 where race1 = 2",
"update person_pums set race = 3 where race1 >=3 and race1 <= 5",
"update person_pums set race = 4 where race1 = 6",
"update person_pums set race = 5 where race1 = 7",
"update person_pums set race = 6 where race1 = 8",
"update person_pums set race = 7 where race1 = 9",
"update person_pums set employment = 1 where esr = 0",
"update person_pums set employment = 2 where esr = 1 or esr = 2 or esr = 4 or esr = 5",
"update person_pums set employment = 3 where esr = 3",
"update person_pums set employment = 4 where esr = 6",
"drop table person_sample",
"create table person_sample select state, pumano, hhid, serialno, pnum, agep, gender, race, employment, relate from person_pums",
"alter table person_sample add index(serialno, pnum)",
"drop table hhld_sample_temp",
"alter table hhld_sample drop column hhldrage",
"alter table hhld_sample rename to hhld_sample_temp",
"drop table hhld_sample",
"create table hhld_sample select hhld_sample_temp.*, agep as hhldrage from hhld_sample_temp left join person_sample using(serialno) where relate = 1",
"alter table hhld_sample add index(serialno)",
"update hhld_sample set hhldrage = 1 where hhldrage <=7 ",
"update hhld_sample set hhldrage = 2 where hhldrage >7"]
DEFAULT_PERSON_PUMSACS_QUERIES = ["alter table person_pums change agep age bigint",
"alter table person_pums change puma pumano bigint",
"alter table person_pums change rac1p race1 bigint",
"alter table person_pums change st state bigint",
"alter table person_pums change sporder pnum bigint",
"alter table person_pums change rel relate bigint",
"alter table person_pums add column agep bigint",
"alter table person_pums add column gender bigint",
"alter table person_pums add column race bigint",
"alter table person_pums add column employment bigint",
"update person_pums set agep = 1 where age < 5",
"update person_pums set agep = 2 where age >= 5 and age < 15",
"update person_pums set agep = 3 where age >= 15 and age < 25",
"update person_pums set agep = 4 where age >= 25 and age < 35",
"update person_pums set agep = 5 where age >= 35 and age < 45",
"update person_pums set agep = 6 where age >= 45 and age < 55",
"update person_pums set agep = 7 where age >= 55 and age < 65",
"update person_pums set agep = 8 where age >= 65 and age < 75",
"update person_pums set agep = 9 where age >= 75 and age < 85",
"update person_pums set agep = 10 where age >= 85",
"update person_pums set gender = sex",
"update person_pums set race = 1 where race1 = 1",
"update person_pums set race = 2 where race1 = 2",
"update person_pums set race = 3 where race1 >=3 and race1 <= 5",
"update person_pums set race = 4 where race1 = 6",
"update person_pums set race = 5 where race1 = 7",
"update person_pums set race = 6 where race1 = 8",
"update person_pums set race = 7 where race1 = 9",
"update person_pums set employment = 1 where esr = 0",
"update person_pums set employment = 2 where esr = 1 or esr = 2 or esr = 4 or esr = 5",
"update person_pums set employment = 3 where esr = 3",
"update person_pums set employment = 4 where esr = 6",
"alter table person_pums add index(serialno)",
"create table person_pums1 select person_pums.*, hhid from person_pums left join serialcorr using(serialno)",
"update person_pums1 set serialno = hhid",
"drop table person_sample",
"create table person_sample select state, pumano, hhid, serialno, pnum, agep, gender, race, employment, relate from person_pums1",
"alter table person_sample add index(serialno, pnum)",
"drop table hhld_sample_temp",
"alter table hhld_sample drop column hhldrage",
"alter table hhld_sample rename to hhld_sample_temp",
"drop table hhld_sample",
"create table hhld_sample select hhld_sample_temp.*, agep as hhldrage from hhld_sample_temp left join person_sample using(serialno) where relate = 0",
"alter table hhld_sample add index(serialno)",
"update hhld_sample set hhldrage = 1 where hhldrage <=7 ",
"update hhld_sample set hhldrage = 2 where hhldrage >7",
"drop table hhld_sample_temp",
"drop table person_pums1"]
DEFAULT_HOUSING_PUMS2000_QUERIES = ["alter table housing_pums add index(serialno)",
"alter table housing_pums add column hhtype bigint",
"alter table housing_pums add column hhldtype bigint",
"alter table housing_pums add column hhldinc bigint",
"alter table housing_pums add column hhldtenure bigint",
"alter table housing_pums add column hhldsize bigint",
"alter table housing_pums add column childpresence bigint",
"alter table housing_pums add column groupquarter bigint",
"alter table housing_pums add column hhldfam bigint",
"update housing_pums set hhtype = 1 where unittype = 0",
"update housing_pums set hhtype = 2 where unittype = 1 or unittype = 2",
"update housing_pums set hhldtype = 1 where hht = 1",
"update housing_pums set hhldtype = 2 where hht = 2",
"update housing_pums set hhldtype = 3 where hht = 3",
"update housing_pums set hhldtype = 4 where hht = 4 or hht = 5",
"update housing_pums set hhldtype = 5 where hht = 6 or hht = 7",
"update housing_pums set hhldtype = -99 where hht = 0",
"update housing_pums set hhldinc = 1 where hinc <15000",
"update housing_pums set hhldinc = 2 where hinc >= 15000 and hinc < 25000",
"update housing_pums set hhldinc = 3 where hinc >= 25000 and hinc < 35000",
"update housing_pums set hhldinc = 4 where hinc >= 35000 and hinc < 45000",
"update housing_pums set hhldinc = 5 where hinc >= 45000 and hinc < 60000",
"update housing_pums set hhldinc = 6 where hinc >= 60000 and hinc < 100000",
"update housing_pums set hhldinc = 7 where hinc >= 100000 and hinc < 150000",
"update housing_pums set hhldinc = 8 where hinc >= 150000",
"update housing_pums set hhldinc = -99 where hht = 0",
#"update housing_pums set hhldtenure = 1 where tenure = 1 or tenure = 2",
#"update housing_pums set hhldtenure = 2 where tenure = 3 or tenure = 4",
#"update housing_pums set hhldtenure = -99 where tenure = 0",
"update housing_pums set hhldsize = persons where persons < 7",
"update housing_pums set hhldsize = 7 where persons >= 7",
"update housing_pums set hhldsize = -99 where hht = 0",
"update housing_pums set childpresence = 1 where noc > 0",
"update housing_pums set childpresence = 2 where noc = 0",
"update housing_pums set childpresence = -99 where hht = 0",
"update housing_pums set groupquarter = unittype where unittype >0",
"update housing_pums set groupquarter = -99 where unittype =0",
"update housing_pums set hhldfam = 1 where hhldtype <=3",
"update housing_pums set hhldfam = 2 where hhldtype > 3",
"delete from housing_pums where persons = 0",
"drop table hhld_sample",
"drop table gq_sample",
"create table hhld_sample select state, pumano, hhid, serialno, hhtype, hhldtype, hhldinc, hhldsize, childpresence, hhldfam from housing_pums where hhtype = 1",
"create table gq_sample select state, pumano, hhid, serialno, hhtype, groupquarter from housing_pums where hhtype = 2",
"alter table hhld_sample add index(serialno)",
"alter table gq_sample add index(serialno)"]
DEFAULT_HOUSING_PUMSACS_QUERIES = ["alter table housing_pums add index(serialno)",
"alter table housing_pums change hincp hinc bigint",
"alter table housing_pums change np persons bigint",
"alter table housing_pums change hupaoc noc bigint",
"alter table housing_pums change type unittype bigint",
"alter table housing_pums change st state bigint",
"alter table housing_pums change puma pumano bigint",
"alter table housing_pums add column hhtype bigint",
"alter table housing_pums add column hhldtype bigint",
"alter table housing_pums add column hhldinc bigint",
"alter table housing_pums add column hhldtenure bigint",
"alter table housing_pums add column hhldsize bigint",
"alter table housing_pums add column childpresence bigint",
"alter table housing_pums add column groupquarter bigint",
"alter table housing_pums add column hhldfam bigint",
"update housing_pums set hhtype = 1 where unittype = 1",
"update housing_pums set hhtype = 2 where unittype = 2 or unittype = 3",
"update housing_pums set hhldtype = 1 where hht = 1",
"update housing_pums set hhldtype = 2 where hht = 2",
"update housing_pums set hhldtype = 3 where hht = 3",
"update housing_pums set hhldtype = 4 where hht = 4 or hht = 6",
"update housing_pums set hhldtype = 5 where hht = 5 or hht = 7",
"update housing_pums set hhldtype = -99 where hht = 0",
"update housing_pums set hhldinc = 1 where hinc <15000",
"update housing_pums set hhldinc = 2 where hinc >= 15000 and hinc < 25000",
"update housing_pums set hhldinc = 3 where hinc >= 25000 and hinc < 35000",
"update housing_pums set hhldinc = 4 where hinc >= 35000 and hinc < 45000",
"update housing_pums set hhldinc = 5 where hinc >= 45000 and hinc < 60000",
"update housing_pums set hhldinc = 6 where hinc >= 60000 and hinc < 100000",
"update housing_pums set hhldinc = 7 where hinc >= 100000 and hinc < 150000",
"update housing_pums set hhldinc = 8 where hinc >= 150000",
"update housing_pums set hhldinc = -99 where hht = 0",
#"update housing_pums set hhldtenure = 1 where tenure = 1 or tenure = 2",
#"update housing_pums set hhldtenure = 2 where tenure = 3 or tenure = 4",
#"update housing_pums set hhldtenure = -99 where tenure = 0",
"update housing_pums set hhldsize = persons where persons < 7",
"update housing_pums set hhldsize = 7 where persons >= 7",
"update housing_pums set hhldsize = -99 where hht = 0",
"update housing_pums set childpresence = 1 where noc =1 or noc = 2 or noc = 3",
"update housing_pums set childpresence = 2 where noc = 4",
"update housing_pums set childpresence = -99 where hht = 0",
"update housing_pums set groupquarter = 1 where unittype >1",
"update housing_pums set groupquarter = -99 where unittype =1",
"update housing_pums set hhldfam = 1 where hhldtype <=3",
"update housing_pums set hhldfam = 2 where hhldtype > 3",
"delete from housing_pums where persons = 0",
"drop table serialcorr",
"create table serialcorr select state, pumano, serialno from housing_pums group by serialno",
"alter table serialcorr add column hhid bigint primary key auto_increment not null",
"alter table serialcorr add index(serialno)",
"drop table hhld_sample",
"drop table gq_sample",
"alter table housing_pums add index(serialno)",
"create table housing_pums1 select housing_pums.*, hhid from housing_pums left join serialcorr using(serialno)",
"update housing_pums1 set serialno = hhid",
"create table hhld_sample select state, pumano, hhid, serialno, hhtype, hhldtype, hhldinc, hhldsize, childpresence, hhldfam from housing_pums1 where hhtype = 1",
"create table gq_sample select state, pumano, hhid, serialno, hhtype, groupquarter from housing_pums1 where hhtype = 2",
"alter table hhld_sample add index(serialno)",
"alter table gq_sample add index(serialno)",
"drop table housing_pums1"]
DEFAULT_SF2000_QUERIES = ["alter table %s add column agep1 bigint",
"alter table %s add column agep2 bigint",
"alter table %s add column agep3 bigint",
"alter table %s add column agep4 bigint",
"alter table %s add column agep5 bigint",
"alter table %s add column agep6 bigint",
"alter table %s add column agep7 bigint",
"alter table %s add column agep8 bigint",
"alter table %s add column agep9 bigint",
"alter table %s add column agep10 bigint",
"alter table %s add column gender1 bigint",
"alter table %s add column gender2 bigint",
"alter table %s add column race1 bigint",
"alter table %s add column race2 bigint",
"alter table %s add column race3 bigint",
"alter table %s add column race4 bigint",
"alter table %s add column race5 bigint",
"alter table %s add column race6 bigint",
"alter table %s add column race7 bigint",
"alter table %s add column employment1 bigint",
"alter table %s add column employment2 bigint",
"alter table %s add column employment3 bigint",
"alter table %s add column employment4 bigint",
"alter table %s add column childpresence1 bigint",
"alter table %s add column childpresence2 bigint",
"alter table %s add column groupquarter1 bigint",
"alter table %s add column groupquarter2 bigint",
"alter table %s add column hhldinc1 bigint",
"alter table %s add column hhldinc2 bigint",
"alter table %s add column hhldinc3 bigint",
"alter table %s add column hhldinc4 bigint",
"alter table %s add column hhldinc5 bigint",
"alter table %s add column hhldinc6 bigint",
"alter table %s add column hhldinc7 bigint",
"alter table %s add column hhldinc8 bigint",
"alter table %s add column hhldsize1 bigint",
"alter table %s add column hhldsize2 bigint",
"alter table %s add column hhldsize3 bigint",
"alter table %s add column hhldsize4 bigint",
"alter table %s add column hhldsize5 bigint",
"alter table %s add column hhldsize6 bigint",
"alter table %s add column hhldsize7 bigint",
"alter table %s add column hhldtype1 bigint",
"alter table %s add column hhldtype2 bigint",
"alter table %s add column hhldtype3 bigint",
"alter table %s add column hhldtype4 bigint",
"alter table %s add column hhldtype5 bigint",
"alter table %s add column hhldrage1 bigint",
"alter table %s add column hhldrage2 bigint",
"alter table %s add column hhldfam1 bigint",
"alter table %s add column hhldfam2 bigint",
"update %s set agep1 = (P008003+P008004+P008005+P008006+P008007) + (P008042+P008043+P008044+P008045+P008046)",
"update %s set agep2 = (P008008+P008009+P008010+P008011+P008012+P008013+P008014+P008015+P008016+P008017 ) + (P008047+P008048+P008049+P008050+P008051+P008052+P008053+P008054+P008055+P008056)",
"update %s set agep3 = (P008018+P008019+P008020+P008021+P008022+P008023+P008024+P008025 ) + (P008057+P008058+P008059+P008060+P008061+P008062+P008063+P008064)",
"update %s set agep4 = (P008026+P008027) + (P008065+P008066)",
"update %s set agep5 = (P008028+P008029) + (P008067+P008068)",
"update %s set agep6 = (P008030+P008031) + (P008069+P008070)",
"update %s set agep7 = (P008032+P008033+P008034) + (P008071+P008072+P008073)",
"update %s set agep8 = (P008035+P008036+P008037) + (P008074+P008075+P008076)",
"update %s set agep9 = (P008038+P008039) + (P008077+P008078)",
"update %s set agep10 = (P008040) + (P008079)",
"update %s set gender1 = P008002",
"update %s set gender2 = P008041",
"update %s set race1 = P006002",
"update %s set race2 = P006003",
"update %s set race3 = P006004",
"update %s set race4 = P006005",
"update %s set race5 = P006006",
"update %s set race6 = P006007",
"update %s set race7 = P006008",
"update %s set employment1 = agep1+agep2+P008018+P008057",
"update %s set employment2 = P043004+P043006+P043011+P043013",
"update %s set employment3 = P043007+P043014",
"update %s set employment4 = P043008+P043015",
"update %s set childpresence1 = P010008 + P010012 + P010015",
"update %s set childpresence2 = P010009 + P010013 + P010016 + P010017 + P010002",
"update %s set groupquarter1 = P009026",
"update %s set groupquarter2 = P009027",
"update %s set hhldinc1 = P052002 + P052003",
"update %s set hhldinc2 = P052004 + P052005",
"update %s set hhldinc3 = P052006 + P052007",
"update %s set hhldinc4 = P052008 + P052009",
"update %s set hhldinc5 = P052010 + P052011",
"update %s set hhldinc6 = P052012 + P052013",
"update %s set hhldinc7 = P052014 + P052015",
"update %s set hhldinc8 = P052016 + P052017",
"update %s set hhldsize1 = P014010 ",
"update %s set hhldsize2 = P014003+P014011 ",
"update %s set hhldsize3 = P014004+P014012 ",
"update %s set hhldsize4 = P014005+P014013 ",
"update %s set hhldsize5 = P014006+P014014 ",
"update %s set hhldsize6 = P014007+P014015 ",
"update %s set hhldsize7 = P014008+P014016 ",
"update %s set hhldtype1 = P010007",
"update %s set hhldtype2 = P010011 ",
"update %s set hhldtype3 = P010014",
"update %s set hhldtype4 = P010002",
"update %s set hhldtype5 = P010017",
"update %s set hhldrage1 = P012002",
"update %s set hhldrage2 = P012017",
"update %s set hhldfam1 = hhldtype1 + hhldtype2 + hhldtype3",
"update %s set hhldfam2 = hhldtype4 + hhldtype5",
"drop table hhld_marginals",
"drop table gq_marginals",
"drop table person_marginals",
"""create table hhld_marginals select state, county, tract, bg, hhldinc1, hhldinc2, hhldinc3, hhldinc4, hhldinc5, hhldinc6, hhldinc7, hhldinc8,"""
"""hhldsize1, hhldsize2, hhldsize3, hhldsize4, hhldsize5, hhldsize6, hhldsize7, hhldtype1, hhldtype2, hhldtype3, hhldtype4, hhldtype5,"""
"""childpresence1, childpresence2, hhldrage1, hhldrage2, hhldfam1, hhldfam2 from %s""",
"create table gq_marginals select state, county, tract, bg, groupquarter1, groupquarter2 from %s",
"""create table person_marginals select state, county, tract, bg, agep1, agep2, agep3, agep4, agep5, agep6, agep7, agep8, agep9, agep10,"""
"""gender1, gender2, race1, race2, race3, race4, race5, race6, race7, employment1, employment2, employment3, employment4 from"""
""" %s"""]
DEFAULT_SFACS_QUERIES = ["alter table %s add column agep1 bigint",
"alter table %s add column agep2 bigint",
"alter table %s add column agep3 bigint",
"alter table %s add column agep4 bigint",
"alter table %s add column agep5 bigint",
"alter table %s add column agep6 bigint",
"alter table %s add column agep7 bigint",
"alter table %s add column agep8 bigint",
"alter table %s add column agep9 bigint",
"alter table %s add column agep10 bigint",
"alter table %s add column gender1 bigint",
"alter table %s add column gender2 bigint",
"alter table %s add column race1 bigint",
"alter table %s add column race2 bigint",
"alter table %s add column race3 bigint",
"alter table %s add column race4 bigint",
"alter table %s add column race5 bigint",
"alter table %s add column race6 bigint",
"alter table %s add column race7 bigint",
"alter table %s add column race11 bigint",
"alter table %s add column race12 bigint",
"alter table %s add column race13 bigint",
"alter table %s add column race14 bigint",
"alter table %s add column race15 bigint",
"alter table %s add column race16 bigint",
"alter table %s add column race17 bigint",
"alter table %s add column race21 bigint",
"alter table %s add column race22 bigint",
"alter table %s add column race23 bigint",
"alter table %s add column race24 bigint",
"alter table %s add column race25 bigint",
"alter table %s add column race26 bigint",
"alter table %s add column race27 bigint",
"alter table %s add column employment1 bigint",
"alter table %s add column employment2 bigint",
"alter table %s add column employment3 bigint",
"alter table %s add column employment4 bigint",
"alter table %s add column childpresence1 bigint",
"alter table %s add column childpresence2 bigint",
"alter table %s add column groupquarter1 bigint",
"alter table %s add column hhldinc1 bigint",
"alter table %s add column hhldinc2 bigint",
"alter table %s add column hhldinc3 bigint",
"alter table %s add column hhldinc4 bigint",
"alter table %s add column hhldinc5 bigint",
"alter table %s add column hhldinc6 bigint",
"alter table %s add column hhldinc7 bigint",
"alter table %s add column hhldinc8 bigint",
"alter table %s add column hhldsize1 bigint",
"alter table %s add column hhldsize2 bigint",
"alter table %s add column hhldsize3 bigint",
"alter table %s add column hhldsize4 bigint",
"alter table %s add column hhldsize5 bigint",
"alter table %s add column hhldsize6 bigint",
"alter table %s add column hhldsize7 bigint",
"alter table %s add column hhldtype1 bigint",
"alter table %s add column hhldtype2 bigint",
"alter table %s add column hhldtype3 bigint",
"alter table %s add column hhldtype4 bigint",
"alter table %s add column hhldtype5 bigint",
"alter table %s add column hhldrage1 bigint",
"alter table %s add column hhldrage2 bigint",
"alter table %s add column hhldfam1 bigint",
"alter table %s add column hhldfam2 bigint",
"alter table %s add column check_gender bigint",
"alter table %s add column check_age bigint",
"alter table %s add column check_race bigint",
"alter table %s add column check_race1 bigint",
"alter table %s add column check_race2 bigint",
"alter table %s add column check_employment bigint",
"alter table %s add column check_type bigint",
"alter table %s add column check_size bigint",
"alter table %s add column check_fam bigint",
"alter table %s add column check_hhldrage bigint",
"alter table %s add column check_inc bigint",
"alter table %s add column check_child bigint",
"update %s set agep1 = (B01001000003)+(B01001000027)",
"update %s set agep2 = (B01001000004+B01001000005) + (B01001000028+B01001000029)",
"update %s set agep3 = (B01001000006+B01001000007+B01001000008+B01001000009+B01001000010) + (B01001000030+B01001000031+B01001000032+B01001000033+B01001000034)",
"update %s set agep4 = (B01001000011+B01001000012) + (B01001000035+B01001000036)",
"update %s set agep5 = (B01001000013+B01001000014) + (B01001000037+B01001000038)",
"update %s set agep6 = (B01001000015+B01001000016) + (B01001000039+B01001000040)",
"update %s set agep7 = (B01001000017+B01001000018+B01001000019) + (B01001000041+B01001000042+B01001000043)",
"update %s set agep8 = (B01001000020+B01001000021+B01001000022) + (B01001000044+B01001000045+B01001000046)",
"update %s set agep9 = (B01001000023+B01001000024) + (B01001000047+B01001000048)",
"update %s set agep10 = (B01001000025) + (B01001000049)",
"update %s set gender1 = B01001000002",
"update %s set gender2 = B01001000026",
"update %s set race1 = B02001000002",
"update %s set race2 = B02001000003",
"update %s set race3 = B02001000004",
"update %s set race4 = B02001000005",
"update %s set race5 = B02001000006",
"update %s set race6 = B02001000007",
"update %s set race7 = B02001000009+B02001000010",
"update %s set race11 = C01001A00001",
"update %s set race12 = C01001B00001",
"update %s set race13 = C01001C00001",
"update %s set race14 = C01001D00001",
"update %s set race15 = C01001E00001",
"update %s set race16 = C01001F00001",
"update %s set race17 = C01001G00001",
"update %s set race21 = B01001A00001",
"update %s set race22 = B01001B00001",
"update %s set race23 = B01001C00001",
"update %s set race24 = B01001D00001",
"update %s set race25 = B01001E00001",
"update %s set race26 = B01001F00001",
"update %s set race27 = B01001G00001",
"""update %s set employment2 = (B23001000005 + B23001000007) + (B23001000012 + B23001000014) + """
"""(B23001000019 + B23001000021) + (B23001000026 + B23001000028) + (B23001000033 + B23001000035) + """
"""(B23001000040 + B23001000042) + (B23001000047 + B23001000049) + (B23001000054 + B23001000056) + """
"""(B23001000061 + B23001000063) + (B23001000068 + B23001000070) + (B23001000075 + B23001000080 + B23001000085) + """
"""(B23001000091 + B23001000093) + (B23001000098 + B23001000100) + """
"""(B23001000105 + B23001000107) + (B23001000112 + B23001000114) + (B23001000119 + B23001000121) + """
"""(B23001000126 + B23001000128) + (B23001000133 + B23001000135) + (B23001000140 + B23001000142) + """
"""(B23001000147 + B23001000149) + (B23001000154 + B23001000156) + (B23001000161 + B23001000166 + B23001000171)""",
"""update %s set employment3 = (B23001000008 + B23001000015 + B23001000022 + """
"""B23001000029 + B23001000036 + B23001000043 + B23001000050 + B23001000057 + B23001000064 +"""
"""B23001000071 + B23001000076 + B23001000081 + B23001000086 + B23001000094 + B23001000101 +"""
"""B23001000108 + B23001000115 + B23001000122 + B23001000129 + B23001000136 + B23001000143 +"""
"""B23001000150 + B23001000157 + B23001000162 + B23001000167 + B23001000172) """,
"""update %s set employment4 = (B23001000009 + B23001000016 + B23001000023 + """
"""B23001000030 + B23001000037 + B23001000044 + B23001000051 + B23001000058 + B23001000065 +"""
"""B23001000072 + B23001000077 + B23001000082 + B23001000087 + B23001000095 + B23001000102 +"""
"""B23001000109 + B23001000116 + B23001000123 + B23001000130 + B23001000137 + B23001000144 +"""
"""B23001000151 + B23001000158 + B23001000163 + B23001000168 + B23001000173) """,
"update %s set employment1 = gender1 + gender2 - employment2 - employment3 - employment4",
"update %s set groupquarter1 = B26001000001",
"update %s set hhldinc1 = B19001000002 + B19001000003",
"update %s set hhldinc2 = B19001000004 + B19001000005",
"update %s set hhldinc3 = B19001000006 + B19001000007",
"update %s set hhldinc4 = B19001000008 + B19001000009",
"update %s set hhldinc5 = B19001000010 + B19001000011",
"update %s set hhldinc6 = B19001000012 + B19001000013",
"update %s set hhldinc7 = B19001000014 + B19001000015",
"update %s set hhldinc8 = B19001000016 + B19001000017",
"update %s set hhldsize1 = B25009000003+B25009000011",
"update %s set hhldsize2 = B25009000004+B25009000012",
"update %s set hhldsize3 = B25009000005+B25009000013",
"update %s set hhldsize4 = B25009000006+B25009000014",
"update %s set hhldsize5 = B25009000007+B25009000015",
"update %s set hhldsize6 = B25009000008+B25009000016",
"update %s set hhldsize7 = B25009000009+B25009000017",
"update %s set hhldtype1 = B11001000003",
"update %s set hhldtype2 = B11001000005",
"update %s set hhldtype3 = B11001000006",
"update %s set hhldtype4 = B11001000008",
"update %s set hhldtype5 = B11001000009",
"""update %s set hhldrage1 = (B25007000003+B25007000004+B25007000005+B25007000006+B25007000007+B25007000008)+"""
"""(B25007000013+B25007000014+B25007000015+B25007000016+B25007000017+B25007000018)""",
"update %s set hhldrage2 = (B25007000009+ B25007000010+B25007000011)+(B25007000019+ B25007000020+B25007000021)",
"update %s set hhldfam1 = hhldtype1 + hhldtype2 + hhldtype3",
"update %s set hhldfam2 = hhldtype4 + hhldtype5",
"update %s set childpresence1 = C23007000002",
"update %s set childpresence2 = C23007000017 + hhldtype4 + hhldtype5",
"update %s set check_gender = gender1 + gender2",
"update %s set check_age = agep1+agep2+agep3+agep4+agep5+agep6+agep7+agep8+agep9+agep10",
"update %s set check_race = race1+race2+race3+race4+race5+race6+race7",
"update %s set check_race1 = race11+race12+race13+race14+race15+race16+race17",
"update %s set check_race2 = race21+race22+race23+race24+race25+race26+race27",
"update %s set check_employment = employment1 + employment2 + employment3 + employment4",
"update %s set check_type = hhldtype1+hhldtype2+hhldtype3+hhldtype4+hhldtype5",
"update %s set check_size = hhldsize1+hhldsize2+hhldsize3+hhldsize4+hhldsize5+hhldsize6+hhldsize7",
"update %s set check_hhldrage = hhldrage1+hhldrage2",
"update %s set check_inc = hhldinc1+hhldinc2+hhldinc3+hhldinc4+hhldinc5+hhldinc6+hhldinc7+hhldinc8",
"update %s set check_fam = hhldfam1+hhldfam2",
"update %s set check_child = childpresence1+childpresence2",
"drop table hhld_marginals",
"drop table gq_marginals",
"drop table person_marginals",
"""create table hhld_marginals select state, county, tract, bg, hhldinc1, hhldinc2, hhldinc3, hhldinc4, hhldinc5, hhldinc6, hhldinc7, hhldinc8,"""
"""hhldsize1, hhldsize2, hhldsize3, hhldsize4, hhldsize5, hhldsize6, hhldsize7, hhldtype1, hhldtype2, hhldtype3, hhldtype4, hhldtype5,"""
"""childpresence1, childpresence2, hhldrage1, hhldrage2, hhldfam1, hhldfam2 from %s""",
"create table gq_marginals select state, county, tract, bg, groupquarter1 from %s",
"""create table person_marginals select state, county, tract, bg, agep1, agep2, agep3, agep4, agep5, agep6, agep7, agep8, agep9, agep10,"""
"""gender1, gender2, race1, race2, race3, race4, race5, race6, race7 from %s"""]
| psrc/urbansim | synthesizer/gui/default_census_cat_transforms.py | default_census_cat_transforms.py | py | 42,838 | python | en | code | 4 | github-code | 36 |
29403005622 | import rooms
import asyncio
from asgiref.sync import async_to_sync
import json as encoder
class WebsocketFireClass():
@async_to_sync
async def new_chat_message(self, state):
encoded_state = encoder.dumps({'type': 'new_chat_message', 'data': state})
print("NEW CHAT MESSAGE, START FIRING...")
if rooms.chat:
await asyncio.wait([connection.send(encoded_state) for connection in rooms.chat])
@async_to_sync
async def new_chat_message2(self, state):
await asyncio.sleep(2) | anotherrandomnickname/ffms-websocket-py | wsfire.py | wsfire.py | py | 532 | python | en | code | 0 | github-code | 36 |
25540372658 | def run():
mi_diccionario = {
"llave1": 1,
"llave2": 2,
"llave3": 3,
}
# print(mi_diccionario["llave1"])
# print(mi_diccionario["llave2"])
# print(mi_diccionario["llave3"])
poblacion_paises = {
"Argentina" : 40234658,
"Brasil" : 70548621,
"Chile" : 526351485,
}
# print(poblacion_paises["Chile"])
print("Imprimo las llaves")
for pais in poblacion_paises.keys():
print(pais)
print("Imprimo los valosres de las llaves")
for pais in poblacion_paises.values():
print(pais)
print("Imprimimos las llaves y valores")
for pais, poblacion in poblacion_paises.items(): #lleva dos variables para almacenar las llaves y los valores
print(pais + " tiene " + str(poblacion)+ " habitantes")
longitud = len(mi_diccionario)
print(longitud)
if __name__ == "__main__":
run() | MorenoChristian/Curso-Basico-de-Python-Platzi | Diccionarios.py | Diccionarios.py | py | 901 | python | es | code | 0 | github-code | 36 |
17774181352 | from rest_framework.permissions import BasePermission
from noticeboard.utils.notices import (
user_allowed_banners,
has_super_upload_right,
)
class IsUploader(BasePermission):
"""
A custom Django REST permission layer to check authorization
over different actions on notices.
"""
def has_permission(self, request, view, **kwargs):
"""
Primary permission for notices.
:param request: Django request object
:param view:
:param kwargs: keyword arguments
:return: boolean expression of permission
"""
if request.method == 'GET' or request.method == 'DELETE':
return True
person = request.person
data = request.data
try:
banner_id = data['banner']
except KeyError:
return False
allowed_banner_ids = user_allowed_banners(person)
if banner_id in allowed_banner_ids:
if data.get('is_important', False):
return has_super_upload_right(person, banner_id)
else:
return True
else:
return False
def has_object_permission(self, request, view, obj, **kwargs):
"""
Object level permission for notices.
:param request: Django request object
:param view:
:param obj: instance of the model
:param kwargs: keyword arguments
:return: boolean expression of permission
"""
if request.method == 'GET':
return True
return obj.uploader.id == request.person.id
| IMGIITRoorkee/omniport-app-noticeboard | permissions/uploader.py | uploader.py | py | 1,591 | python | en | code | 6 | github-code | 36 |
5049475129 | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import subprocess
import sys
import pygit2
sys.path.insert(0, os.path.abspath('../..'))
project = 'tensorrt_llm'
copyright = '2023, NVidia'
author = 'NVidia'
branch_name = pygit2.Repository('.').head.shorthand
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
templates_path = ['_templates']
exclude_patterns = []
extensions = [
'sphinx.ext.duration',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'myst_parser', # for markdown support
"breathe",
'sphinx.ext.todo',
]
myst_url_schemes = {
"http":
None,
"https":
None,
"source":
"https://github.com/NVIDIA/TensorRT-LLM/tree/" + branch_name + "/{{path}}",
}
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# ------------------------ C++ Doc related --------------------------
# Breathe configuration
breathe_default_project = "TensorRT-LLM"
breathe_projects = {"TensorRT-LLM": "../cpp_docs/xml"}
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CPP_INCLUDE_DIR = os.path.join(SCRIPT_DIR, '../../cpp/include/tensorrt_llm')
CPP_GEN_DIR = os.path.join(SCRIPT_DIR, '_cpp_gen')
print('CPP_INCLUDE_DIR', CPP_INCLUDE_DIR)
print('CPP_GEN_DIR', CPP_GEN_DIR)
def gen_cpp_doc(ofile_name: str, header_dir: str, summary: str):
cpp_header_files = [
file for file in os.listdir(header_dir) if file.endswith('.h')
]
with open(ofile_name, 'w') as ofile:
ofile.write(summary + "\n")
for header in cpp_header_files:
ofile.write(f"{header}\n")
ofile.write("_" * len(header) + "\n\n")
ofile.write(f".. doxygenfile:: {header}\n")
ofile.write(" :project: TensorRT-LLM\n\n")
runtime_summary = f"""
Runtime
==========
.. Here are files in the cpp/include/runtime
.. We manually add subsection to enable detailed description in the future
.. It is also doable to automatically generate this file and list all the modules in the conf.py
""".strip()
subprocess.run(['mkdir', '-p', CPP_GEN_DIR])
gen_cpp_doc(CPP_GEN_DIR + '/runtime.rst', CPP_INCLUDE_DIR + '/runtime',
runtime_summary)
| NVIDIA/TensorRT-LLM | docs/source/conf.py | conf.py | py | 2,943 | python | en | code | 3,328 | github-code | 36 |
43343758366 | def hanoi(n, src, via, dst) :
global cnt
if n == 1 :
cnt += 1
# print(f"{src} -> {dst}")
else :
hanoi(n - 1, src, dst, via)
hanoi(1, src, via, dst)
hanoi(n - 1, via, src, dst)
N = int(input())
cnt = 0
hanoi(N, "A", "B", "C")
print(cnt) | RelexSun/python-jupyter-notebook | PythonAlgorithm/Alg.2.4/solve.py | solve.py | py | 262 | python | en | code | 0 | github-code | 36 |
18745919637 | from telegram import Bot, Update, ParseMode
from telegram.ext import run_async
import time
from bot.modules.helper_funcs.extraction import extract_user
from bot import dispatcher
from bot.modules.disable import DisableAbleCommandHandler
@run_async
def gdpr(bot: Bot, update: Update):
message = update.effective_message
chat = update.effective_chat
user = update.effective_user
if chat.type == 'private':
message.reply_text("Deleting identifiable data...")
time.sleep(2)
message.reply_text("Almost done, Just be Patient")
time.sleep(2)
message.reply_text("My Ass! do not come here AGAIN. If you are gbanned this cmd will not revert it. So kindly GTFO.")
message.reply_text("Pajeet confirm")
GDPR_HANDLER = DisableAbleCommandHandler("gdpr", gdpr)
dispatcher.add_handler(GDPR_HANDLER)
| koppiesttiajaykumar/bot | bot/modules/gdpr.py | gdpr.py | py | 914 | python | en | code | 0 | github-code | 36 |
71075117545 | import collections
class Solution:
def removeStones(self, stones: List[List[int]]) -> int:
stones = list(map(tuple, stones))
s = set(stones)
dx = collections.defaultdict(set)
dy = collections.defaultdict(set)
for i,j in s:
dx[i].add(j)
dy[j].add(i)
def dfs(i, j):
for nextY in dx[i]:
if (i, nextY) in s:
s.remove((i, nextY))
dfs(i, nextY)
for nextX in dy[j]:
if (nextX, j) in s:
s.remove((nextX, j))
dfs(nextX, j)
island = 0
for x, y in stones:
if (x, y) not in s:
continue
island += 1
dfs(x, y)
return len(stones) - island | nango94213/Leetcode-solution | 0947-most-stones-removed-with-same-row-or-column/0947-most-stones-removed-with-same-row-or-column.py | 0947-most-stones-removed-with-same-row-or-column.py | py | 879 | python | en | code | 2 | github-code | 36 |
41224759283 | '''
Created on 15-Oct-2013
@author: Kashaj
'''
import re, sqlite3,os
db = sqlite3.connect('Train_Database.db')
db.text_factory = str
db.row_factory = sqlite3.Row
db.execute('drop table if exists TrainStationNode')
db.execute('create table TrainStationNode(Train_Num char[6],stn_code char[6],route int,arr_time text,dep_time text)')
def main():
tr_num = open('List_Of_All_Train_Nums.txt','r')
for num in tr_num:
num = str(num)
num = num.replace("\xa0","");
num = num.replace("\n","");
num = num.strip(' ')
num = num.strip('\n')
hread(num)
tuples = hwork()
hdatab(num,tuples)
getData()
db.commit()
def hread(filename):
save_path = r'C:/Users/kashaj/Desktop/proj/data/schedule/'
filename += '.html'
completeName = os.path.join(save_path, filename)
f = open(completeName,'r')
text = f.read()
text = text.strip('\n')
f2 = open('loolws.txt','w')
f2.write(text)
f2.close()
def hwork():
f = open('loolws.txt','r')
text = f.read()
tuples = re.findall(r'<TR>\n<TD>\d+</TD>\n<TD>(\w+\s*)</TD>\n<TD>\w+.*</TD>\n<TD>(\d)+</TD>\n<TD>(.+)</TD>\n<TD>(.+)</TD>\n<TD>.*</TD>\n<TD>\d+</TD>\n<TD>\d+</TD>',text,re.IGNORECASE)
f.close()
return(tuples)
def hdatab(num,tuples):
for i in range(0,len(tuples)):
if(i==0):
db.execute('insert into TrainStationNode(Train_Num,stn_code,route,arr_time,dep_time) values (?,?,?,?,?)',(num,tuples[i][0],tuples[i][1],(tuples[i][2]).replace('<FONT COLOR = red>', ''),(tuples[i][3])))
elif(i == (len(tuples)-1)):
db.execute('insert into TrainStationNode(Train_Num,stn_code,route,arr_time,dep_time) values (?,?,?,?,?)',(num,tuples[i][0],tuples[i][1],tuples[i][2],(tuples[i][3]).replace('<FONT COLOR = red>', '')))
else:
db.execute('insert into TrainStationNode(Train_Num,stn_code,route,arr_time,dep_time) values (?,?,?,?,?)',(num,tuples[i][0],tuples[i][1],tuples[i][2],(tuples[i][3]).replace('<FONT COLOR = red>', '')))
def getData():
cursor = db.execute('Select Train_Num,stn_code,route,arr_time,dep_time from TrainStationNode')
for row in cursor:
print(row['Train_Num'],row['stn_code'],row['route'],row['arr_time'],row['dep_time'])
if __name__ == '__main__':
main() | ShaikAsifullah/Indian-Railways-Informal | getEdges.py | getEdges.py | py | 2,447 | python | en | code | 1 | github-code | 36 |
75138929384 | from loja.models import Produto
from loja.models import Pedido
from loja.models import CATEGORIAS
from rest_framework import serializers
class ProdutoSerializer(serializers.ModelSerializer):
class Meta:
model = Produto
fields = (
'id',
'nome',
'descricao',
'valor',
'categoria'
)
class PedidoSerializer(serializers.ModelSerializer):
valor_total = serializers.SerializerMethodField()
class Meta:
model = Pedido
fields = (
'id',
'produtos',
'cliente',
'status',
'data_realizacao',
'valor_total',
)
def get_valor_total(self, obj):
return obj.get_valor_total()
def validate_produtos(self, attrs):
msg = 'É necessesario escolher no minimo um produto de cada categoria'
produtos = attrs
if len(produtos) < len(CATEGORIAS):
raise serializers.ValidationError(msg)
categorias_index = list(dict(CATEGORIAS))
for p in produtos:
if not p.categoria in categorias_index:
raise serializers.ValidationError(msg)
return attrs
def validate(self, attrs):
return attrs | jonasfsilva/desafio_intmed | loja/serializers.py | serializers.py | py | 1,298 | python | pt | code | 0 | github-code | 36 |
20780721557 | import os #to access files
from PIL import Image #to open JPEGs
import numpy as np
#-------------------------------Custom INCLUDES-------------------------------
import lookupTables as lT
#-------------------------------Function DEFINITIONS----------------------------
def fittingEstimator(inDir, file_name, EV_calc_local, EV_calc_global, LUT):
EV_calc_local[:] = [0.0] #list of statistics
EV_calc_global[:] = []
#lattice parameter
R=10
#-------Opening image1
#separate core of name, append jpg extension, open, BW
name = (file_name[0].split(".")[0] + ".jpg")
img1 = Image.open("./"+inDir+"/"+name)
img1 = np.array( img1.convert("L") )
#dimensions of image
N,M = img1.shape
#fill the first plot list
cntr = -1
x1 = np.array( [0.0]*(len(range(0,N,R))*len(range(0,M,R))) )
x2 = np.array( [0.0]*(len(range(0,N,R))*len(range(0,M,R))) )
for i in range(0,N,R):
for j in range(0,M,R):
cntr += 1
x1[cntr] = LUT( img1[i,j] )
for name in file_name[1:]:
#-------Opening image 2
#separate core of name, append jpg extension, open, BW
name = (name.split(".")[0] + ".jpg")
img2 = Image.open("./"+inDir+"/"+name)
img2 = np.array( img2.convert('L') )
cntr = -1
for i in range(0,N,R):
for j in range(0,M,R):
cntr += 1
x2[cntr] = LUT( img2[i,j] )
#now compare x1 and x2
#fitting x+b is equivalent to calculating
#print(np.average( x1-x2 ))
xx1 = []
xx2 = []
for (x,y) in zip(x1,x2):
if x>=1 and x<=7 and y>=1 and y<=7:
xx1.append(x)
xx2.append(y)
if len(xx1) == 0 or len(xx2) == 0:
print("WARNING!!")
print(x2)
EV_calc_local.append( np.average( np.array(xx2)-np.array(xx1) ) + EV_calc_local[-1] )
#import matplotlib.pyplot as plt
#plt.figure(1)
#plt.plot(x1,x2,"x",xx1,xx2,"o")
#plt.show()
#change images
img1[:] = img2[:]
x1[:] = x2[:]
EV_calc_global[:] = EV_calc_local[:]
| nowaythatsok/GNU_lapser | version_01/estimators.py | estimators.py | py | 1,924 | python | en | code | 0 | github-code | 36 |
2671289236 | import os
import shutil
from fastapi import UploadFile
# UPLOAD_DIR = "model_upload_dir"
def upload_file(upload_dirname: str, file: UploadFile, filename: str):
if file and filename:
fileobj = file.file
target_path = os.path.join(upload_dirname, filename)
target_dir = os.path.dirname(target_path)
os.makedirs(target_dir, exist_ok=True)
upload_dir = open(target_path, "wb+")
shutil.copyfileobj(fileobj, upload_dir)
upload_dir.close()
return {"status": "OK", "msg": f"uploaded files {filename} "}
return {"status": "ERROR", "msg": "uploaded file is not found."}
def concat_file_chunks(
upload_dirname: str, filename: str, chunkNum: int, dest_dirname: str
):
target_path = os.path.join(upload_dirname, filename)
target_dir = os.path.dirname(target_path)
os.makedirs(target_dir, exist_ok=True)
if os.path.exists(target_path):
os.remove(target_path)
with open(target_path, "ab") as out:
for i in range(chunkNum):
chunkName = f"{filename}_{i}"
chunk_file_path = os.path.join(upload_dirname, chunkName)
stored_chunk_file = open(chunk_file_path, "rb")
out.write(stored_chunk_file.read())
stored_chunk_file.close()
os.remove(chunk_file_path)
out.close()
return {"status": "OK", "msg": f"concat files {out} "}
| w-okada/voice-changer | server/restapi/mods/FileUploader.py | FileUploader.py | py | 1,402 | python | en | code | 12,673 | github-code | 36 |
19567593432 | import requests
import csv
from bs4 import BeautifulSoup
import json
from collections import namedtuple
from typing import List, Dict
TOPICS_NUMBER = 6
LEVELS_NUMBER = 5
MIN_LEVEL_CONTEST_ID = "030813"
MAX_LEVEL_CONTEST_ID = "030817"
TABLE_URL = ("https://ejudge.lksh.ru/standings/dk/stand.php"
"?from={}&to={}".format(MIN_LEVEL_CONTEST_ID,
MAX_LEVEL_CONTEST_ID))
Student = namedtuple('Student', 'group, last_name, first_name, ejid')
def get_table(table_url) -> BeautifulSoup:
"""Fetches results table from ejudge, returns soup"""
response = requests.get(table_url)
soup = BeautifulSoup(response.text, "lxml")
table = soup.select_one("table")
return table
def parse_results(table_soup: BeautifulSoup) -> Dict[Student, List[int]]:
"""Returns dict like {Student: [1 if problem is solved else 0]}"""
result = {}
rows = [row for row in table_soup.select("tr") if row.has_attr("ejid")]
for row in rows:
[group, last_name, first_name], ejid = row.select_one("nobr").contents[
0].split(), int(row['ejid'])
problem_tags = [td for td in row.findAll("td") if td.has_attr("title")]
solved = [1 if tag["class"] == ["ac"] else 0 for tag in problem_tags]
result[Student(group, last_name, first_name, ejid)] = solved
return result
def last_occurrence(list_, elem):
return len(list_) - 1 - list_[::-1].index(elem)
def calculate_mark(solved: List[int]) -> int:
"""Calculates mark from a list of solved"""
levels = set()
max_levels = []
for topic in range(TOPICS_NUMBER):
solved_from_topic = solved[topic::TOPICS_NUMBER]
if any(solved_from_topic):
max_level = last_occurrence(solved_from_topic, 1)
max_levels.append(max_level)
max_levels.sort(reverse=True)
for level in max_levels:
while level in levels and level != 0:
level -= 1
levels.add(level)
return min(len(levels), len(max_levels))
def get_table_to_render(parsed_table: Dict[Student, List[int]]) -> list:
return sorted([(*student, calculate_mark(solved))
for student, solved in parsed_table.items()])
def get_table_json(parsed_table: Dict[Student, List[int]]) -> str:
return json.dumps([{
'first_name': student.first_name,
'last_name': student.last_name,
'group': student.group,
'score': calculate_mark(solved),
'ejid': student.ejid
} for student, solved in parsed_table.items()],
ensure_ascii=False)
def get_personal_json(parsed_table: Dict[Student, List[int]], ejid: int) -> str:
filtered_student = [(student, solved)
for student, solved in parsed_table.items()
if student.ejid == ejid]
if filtered_student:
student, solved = filtered_student[0]
return json.dumps({'first_ name': student.first_name,
'last_name': student.last_name,
'group': student.group,
'score': calculate_mark(solved),
'solved': solved,
'ejid': student.ejid},
ensure_ascii=False)
else:
return json.dumps({'error': 'ID не найден'}, ensure_ascii=False)
if __name__ == '__main__':
results_table = get_table(TABLE_URL)
parsed_table = parse_results(results_table)
table = get_table_to_render(parsed_table)
with open("results.csv", "w", encoding="utf-8") as file:
csv_writer = csv.writer(file)
csv_writer.writerows(table)
| daniil-konovalenko/Cprime-practice-results | load_results.py | load_results.py | py | 3,803 | python | en | code | 0 | github-code | 36 |
24401528809 | """Handles the creating of obstacles within the game instance."""
import pygame
from src.scripts.coordinate import Coordinate
class Obstacle:
"""Class for handling the creating of obstables."""
def __init__(self) -> None:
self.size = (50, 300)
self.position = [
Coordinate(700, 400),
Coordinate(700, 0)
]
self.can_move = True
def get_rect(self) -> tuple[pygame.Rect, pygame.Rect]:
"""Get rect"""
return (
pygame.Rect(
tuple(self.position[0])[0], tuple(self.position[0])[1], self.size[0], self.size[1]
), # bottom rect
pygame.Rect(
tuple(self.position[1])[0], tuple(self.position[1])[1], self.size[0], self.size[1]
) # top rect
)
def move(self) -> None:
"""Move the obstace foward."""
if self.position[0].x < 45:
self.can_move = False
self.position[0].x -= 3
self.position[1].x -= 3
| Carson-Fletcher/PY_Flappy_Bird | src/scripts/obstacle.py | obstacle.py | py | 1,007 | python | en | code | 0 | github-code | 36 |
43431199600 | #!/usr/bin/env python3
import unittest
from game import *
TEST_BOARD01 = [
# 0123456789ABCDEFGHI
" W W W W ", # 0
" ", # 1
"W W W W", # 2
" W W W ", # 3
" W P P W ", # 4
"W W W W W W", # 5
" W P W ", # 6
" W P P W ", # 7
" P P P P P ", # 8
" W P PKP P W ", # 9
" P P P P P ", # 10
" W P P W ", # 11
" W P W ", # 12
"W W W W W W", # 13
" W P P W ", # 14
" W W W ", # 15
"W W W W", # 16
" ", # 17
" W W W W ", # 18
]
# For testing capturing of pieces.
TEST_BOARD02 = [
# 0123456789ABCDEFGHI
" WP P P", # 0
"P PWP W P", # 1
"W P W", # 2
" ", # 3
" KWP ", # 4
" WPW ", # 5
" PW ", # 6
" ", # 7
" ", # 8
" W WP ", # 9
" WP ", # 10
" W ", # 11
" ", # 12
" P P ", # 13
" W ", # 14
" ", # 15
"W K ", # 16
"P WP", # 17
"W W", # 18
]
# For testing capturing of kings.
TEST_BOARD03 = [
# 0123456789ABCDEFGHI
" KP P", # 0
"K K WK", # 1
"W K W W", # 2
" W W WK ", # 3
" WKW ", # 4
" W ", # 5
" W KW PK ", # 6
" W ", # 7
" W ", # 8
" P K ", # 9
" WKW W ", # 10
" W K ", # 11
" ", # 12
" P ", # 13
" PKP ", # 14
" WWW ", # 15
"W WK W ", # 16
"K W W KW", # 17
"W W ", # 18
]
# No black pieces remaining
TEST_BOARD04 = [
# 0123456789ABCDEFGHI
" ", # 0
" ", # 1
" ", # 2
" ", # 3
" P P ", # 4
" ", # 5
" P ", # 6
" P P ", # 7
" P P P P P ", # 8
" P PKP P ", # 9
" P P P P P ", # 10
" P P ", # 11
" P ", # 12
" ", # 13
" P P ", # 14
" ", # 15
" ", # 16
" ", # 17
" ", # 18
]
# Black is deadlocked against corner
TEST_BOARD05 = [
# 0123456789ABCDEFGHI
" PW", # 0
" P", # 1
" ", # 2
" ", # 3
" ", # 4
" ", # 5
" P ", # 6
" P P ", # 7
" P P P P P ", # 8
" P PKP P ", # 9
" P P P P P ", # 10
" P P ", # 11
" P ", # 12
" ", # 13
" P P ", # 14
" ", # 15
" ", # 16
" ", # 17
" ", # 18
]
# Black is deadlocked at corner and wall
TEST_BOARD06 = [
# 0123456789ABCDEFGHI
" PWP PW", # 0
" P P", # 1
" ", # 2
" ", # 3
" ", # 4
" ", # 5
" P ", # 6
" P P ", # 7
" P P P P P ", # 8
" P PKP P ", # 9
" P P P P P ", # 10
" P P ", # 11
" P ", # 12
" ", # 13
" P P ", # 14
" ", # 15
" ", # 16
" ", # 17
" ", # 18
]
# White has both its king and a pawn deadlocked
TEST_BOARD07 = [
# 0123456789ABCDEFGHI
" WKW ", # 0
" WPW ", # 1
" W ", # 2
" ", # 3
" ", # 4
" ", # 5
" ", # 6
" ", # 7
" ", # 8
" ", # 9
" ", # 10
" ", # 11
" ", # 12
" ", # 13
" ", # 14
" ", # 15
" ", # 16
" ", # 17
" ", # 18
]
def convert(c):
if c == ' ':
return Piece.NONE
elif c == 'P':
return Piece.PAWN
elif c == 'W':
return Piece.WOLF
elif c == 'K':
return Piece.KING
else:
raise ValueError("Only supports characters ' ', 'P', 'W', and 'K'.")
# Allows easy construction of boards for tests.
def parseTestBoard(b):
board = []
for row in b:
board.append([convert(p) for p in row])
return board
# Add tests by defining methods as 'test_<function_to_test>'
# and use asserts provided by the unittest module.
# Run using './tests.py' or 'python3 -m unittest tests'.
class TestGameMethods(unittest.TestCase):
def test_nextPlayer(self):
playerOne = Player.WHITE
playerTwo = Player.BLACK
self.assertEqual(nextPlayer(playerOne), playerTwo)
self.assertEqual(nextPlayer(playerTwo), playerOne)
def test_movePiece(self):
# Moves a piece around in the board and asserts that the board is updated
# accordingly.
board = parseTestBoard(TEST_BOARD01)
positions = [
Position(0,2),
Position(0,3),
Position(6,3),
Position(6,1),
Position(0,1),
Position(0,2)
]
for i in range(len(positions)-1):
pos1, pos2 = positions[i], positions[i+1]
piece = getBoardPiece(board, pos1)
move = (pos1, pos2)
p = movePiece(board, move)
self.assertEqual(p, piece) # returns moved piece
self.assertEqual(getBoardPiece(board, pos1), Piece.NONE) # old pos updated
self.assertEqual(getBoardPiece(board, pos2), piece) # new pos updated
def test_tryCapturePiece(self):
cases = {
(Position(0,1), Position(0,0), Player.WHITE), # With marked square horizontally.
(Position(1,0), Position(2,0), Player.BLACK), # With marked square vertically.
(Position(1,6), Position(1,5), Player.WHITE), # With surrounding pieces horizontally.
(Position(1,6), Position(1,7), Player.WHITE), # With surrounding pieces horizontally.
(Position(1,13), Position(0,13), Player.WHITE), # With surrounding pieces vertically.
(Position(1,13), Position(2,13), Player.WHITE), # With surrounding pieces vertically.
(Position(1,18), Position(2,18), Player.BLACK), # With blocked marked square.
(Position(4,9), Position(4,8), Player.WHITE), # With king and pawn.
(Position(4,9), Position(4,10), Player.WHITE), # With king and pawn.
(Position(5,4), Position(5,3), Player.BLACK), # With surrounding pieces horizontally.
(Position(5,4), Position(5,5), Player.BLACK), # With surrounding pieces horizontally.
(Position(9,10), Position(9,11), Player.WHITE), # With middle marked square.
(Position(10,6), Position(9,6), Player.BLACK), # With three surrounding.
(Position(10,6), Position(11,6), Player.BLACK), # With three surrounding.
(Position(17,0), Position(16,0), Player.BLACK), # With marked and surrounding.
(Position(17,0), Position(18,0), Player.BLACK), # With marked and surrounding.
(Position(17,17), Position(16,17), Player.WHITE), # With king and mark.
}
for mid,opp,player in cases:
# All positions above should be captured.
board = parseTestBoard(TEST_BOARD02)
if player == Player.BLACK:
capturers = {Piece.WOLF}
opponents = {Piece.PAWN}
piece = Piece.PAWN
else:
capturers = {Piece.PAWN, Piece.KING}
opponents = {Piece.WOLF}
piece = Piece.WOLF
self.assertEqual(tryCapturePiece(board, capturers, opponents, mid, opp),
piece,
"With positions {}, {}".format(str(mid), str(opp)))
self.assertEqual(getBoardPiece(board, mid), Piece.NONE, "With position {}".format(str(mid)))
def test_tryCaptureKing(self):
board = parseTestBoard(TEST_BOARD03)
positions = {Position(r,c) for r in range(BOARD_BOUNDARY) for c in range(BOARD_BOUNDARY)}
capturedPositions = {
Position(1,0), # With two marked squares.
Position(1,18), # With one "blocked" marked square.
Position(4,8), # With three wolves.
Position(10,6), # With three wolves and pawn.
Position(17,0), # With one marked.
Position(17,17), # With three wolves and marked.
}
for pos in capturedPositions:
# All king positions above should be captured.
self.assertTrue(tryCaptureKing(board, pos), "With position {}".format(str(pos)))
self.assertEqual(getBoardPiece(board, pos), Piece.NONE, "With position {}".format(str(pos)))
for pos in positions:
# All other king positions should not be captured.
piece = getBoardPiece(board, pos)
if piece == Piece.KING:
self.assertFalse(tryCaptureKing(board, pos), "With position {}".format(str(pos)))
self.assertEqual(getBoardPiece(board, pos), piece, "With position {}".format(str(pos)))
def test_checkForWin(self):
cases = [
(Player.BLACK, [Piece.KING], Piece.WOLF, Position(3,2), True),
(Player.BLACK, [Piece.PAWN, Piece.KING], Piece.WOLF, Position(6,1), True),
(Player.BLACK, [Piece.PAWN, Piece.KING], Piece.WOLF, Position(1,8), True),
(Player.BLACK, [Piece.PAWN, Piece.KING, Piece.PAWN], Piece.WOLF, Position(1,10), True),
(Player.BLACK, [], Piece.WOLF, Position(1,12), False),
(Player.BLACK, [Piece.PAWN, Piece.PAWN], Piece.WOLF, Position(1,12), False),
(Player.WHITE, [Piece.WOLF, Piece.WOLF], Piece.PAWN, Position(0,0), False),
(Player.WHITE, [], Piece.PAWN, Position(18,17), False),
(Player.WHITE, [], Piece.PAWN, Position(9,10), False),
(Player.WHITE, [], Piece.KING, Position(0,1), True),
(Player.WHITE, [Piece.WOLF], Piece.KING, Position(0,17), True),
(Player.WHITE, [], Piece.KING, Position(0,18), True),
(Player.WHITE, [], Piece.KING, Position(18,18), True),
(Player.WHITE, [Piece.WOLF], Piece.KING, Position(18,18), True),
(Player.WHITE, [Piece.WOLF], Piece.KING, Position(7,0), False),
(Player.WHITE, [Piece.WOLF], Piece.KING, Position(2,0), False),
(Player.WHITE, [], Piece.KING, Position(9,9), False),
]
for player, capturedPieces, movedPiece, pos, expected in cases:
self.assertEqual(expected, checkForWin(player, capturedPieces, movedPiece, pos))
def test_isValidInput(self):
invalidInputs = [""," ", "a20", "a-1", "a0", "a111", "a!1"]
for square in invalidInputs:
self.assertFalse(isValidInput(square))
validInputs = ["a1","A1", "s19", "S19"]
for square in validInputs:
self.assertTrue(isValidInput(square))
def test_isValidMove(self):
player = Player.WHITE
board = parseTestBoard(TEST_BOARD01)
blockedPath = ("O11", "O14") # another piece is in the path
diagonal = ("O11", "M9") # moving diagonally
occupied = ("O11", "L11") # moving to an occupied square
kingTooFar = ("J10", "E15") # king is moving more than 4 squares
notSupportedMove = ("H11", "F12") #pawn moving non-perfect-vertical,horizontal or diagonal
kingNotSupportedMove = ("J10", "H11") #king moving non-perfect-vertical,horizontal or diagonal
illegalMoves = [blockedPath, diagonal, occupied, kingTooFar, notSupportedMove, kingNotSupportedMove]
for move in illegalMoves:
self.assertFalse(isValidMove(board, player, move))
west = ("K5", "K1")
east = ("K15", "K19")
north = ("E9", "A9")
south = ("O9", "S9")
diagonalKingLegal = ("J10", "G13")
legalMoves = [west, east, north, south, diagonalKingLegal]
for move in legalMoves:
self.assertTrue(isValidMove(board, player, move))
def test_parseMove(self):
self.assertEqual(parseMove("A1"), Position(0,0))
self.assertEqual(parseMove("S19"), Position(18,18))
self.assertEqual(parseMove("a1"), parseMove("A1"))
self.assertEqual(parseMove("S19"), parseMove("s19"))
def test_isOwnPiece(self):
#player WHITE
player = Player.WHITE
board = parseTestBoard(TEST_BOARD01)
self.assertTrue(isOwnPiece(player, board, "o11")) #PAWN
self.assertTrue(isOwnPiece(player, board, "j10")) #KING
self.assertFalse(isOwnPiece(player, board, "A1")) #NONE
#player BLACK
player = Player.BLACK
self.assertTrue(isOwnPiece(player, board, "q6")) #WAREWOLF
self.assertFalse(isOwnPiece(player, board, "A1")) #NONE
def test_isDeadlock(self):
#Black has no remaining pieces
captured = {Player.WHITE: 0, Player.BLACK: BLACK_PIECES}
player = Player.BLACK
boardNoBlack = parseTestBoard(TEST_BOARD04)
self.assertTrue(isDeadlock(boardNoBlack, player, captured))
#Black has one remaining and is cornered and deadlocked
captured = {Player.WHITE: 0, Player.BLACK: BLACK_PIECES - 1}
boardBlackDeadlock = parseTestBoard(TEST_BOARD05)
self.assertTrue(isDeadlock(boardBlackDeadlock, player, captured))
#Black has two remaining and both are cornered and deadlocked
captured = {Player.WHITE: 0, Player.BLACK: BLACK_PIECES - 2}
boardBlackDeadlock = parseTestBoard(TEST_BOARD06)
self.assertTrue(isDeadlock(boardBlackDeadlock, player, captured))
#White has managed to deadlock both the remaining pawn and king
player = Player.WHITE
captured = {Player.WHITE: WHITE_PIECES - 2, Player.BLACK: BLACK_PIECES - 5}
boardWhiteDeadlock = parseTestBoard(TEST_BOARD07)
self.assertTrue(isDeadlock(boardWhiteDeadlock, player, captured))
if __name__ == '__main__':
unittest.main()
| AquaSpare/Alea-Evangelii-group-g | tests.py | tests.py | py | 14,013 | python | en | code | 0 | github-code | 36 |
24925255734 | import pygame
pygame.init()
screen_width = 480
screen_height = 640
screen = pygame.display.set_mode((screen_width, screen_height))
background = pygame.image.load("D:/coding/python/pygame_basic/background.png")
pygame.display.set_caption("offRo")
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill((0, 125, 255))
#screen.blit(background, (0,0))
pygame.display.update()
pygame.quit() | pla2n/python_practice | python/pygame_basic/2_background.py | 2_background.py | py | 496 | python | en | code | 0 | github-code | 36 |
25946850418 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
result = 0
stack = [(root, 1)]
while stack:
node, lvl = stack.pop()
if not node:
continue
result = max(result, lvl)
if node.left:
stack.append([node.left, lvl+1])
if node.right:
stack.append([node.right, lvl+1])
return result
| dzaytsev91/leetcode-algorithms | easy/104_maximum_depth_binary_tree.py | 104_maximum_depth_binary_tree.py | py | 631 | python | en | code | 2 | github-code | 36 |
41483919268 | import json
import requests
from MGP_SDK import process
from MGP_SDK.auth.auth import Auth
class Pipelines:
def __init__(self, auth: Auth):
self.auth = auth
self.api_version = self.auth.api_version
self.base_url = f'{self.auth.api_base_url}/ordering/{self.api_version}/pipelines'
self.token = self.auth.refresh_token()
self.authorization = {'Authorization': f'Bearer {self.token}'}
def list_all_pipelines(self):
"""
List out all available pipelines
Returns:
Dictionary of all available pipelines and their information
"""
url = f"{self.base_url}?limit=100"
response = requests.get(url, headers=self.authorization, verify=self.auth.SSL)
process._response_handler(response)
return response.json()
def get_pipeline(self, namespace: str, name: str):
"""
Get the schema for a specific pipeline
Args:
namespace (string) = A group of pipelines (e.g. 'Imagery')
name (string) = Name of the pipeline to order from (e.g. 'analysis-ready')
Returns:
Dictionary schema of a specific pipeline
"""
url = f"{self.base_url}/{namespace}/{name}"
response = requests.get(url, headers=self.authorization, verify=self.auth.SSL)
process._response_handler(response)
return response.json()
def post_order_or_get_estimate(self, namespace: str, name: str, settings: dict, output_config: dict, metadata: dict, endpoint: str, **kwargs):
"""
Place an order or validate an order request before placing it
Args:
namespace (string) = A group of pipelines (e.g. 'Imagery')
name (string) = Name of the pipeline to order from (e.g. 'analysis-ready')
settings (dict) = Settings specific to this pipeline. (required if the requested pipeline requires
user-provided input parameters and has a json_schema attribute)
output_config (dict) = Delivery configuration. Amazon S3, Google Cloud Storage, Azure Blob storage
are supported.
endpoint (string) = Desired endpoint (order or validate)
metadata (dict) = Supplemental information to attach to this order
Kwargs:
notifications (list(dict)) = Desired notification type (e.g. 'email'), target (e.g. 'email-address'), and
level (e.g. 'INITIAL_FINAL')
metadata (dict) = Supplemental information to attch to this order
:return:
"""
kwarg_list = ['notifications', 'metadata']
data = {**{k: v for k, v in kwargs.items() if k in kwarg_list}, **settings, **output_config, **metadata}
if endpoint == 'order':
if 'validate' in kwargs.keys() and kwargs['validate']:
endpoint = 'validate'
else:
endpoint = 'order'
elif endpoint == 'estimate':
endpoint = 'estimate'
url = f"{self.base_url}/{namespace}/{name}/{endpoint}"
response = requests.post(url, data=json.dumps(data), headers=self.authorization, verify=self.auth.SSL)
process._response_handler(response)
return response.json()
| Maxar-Corp/maxar-geospatial-platform | src/MGP_SDK/ordering_service/pipelines.py | pipelines.py | py | 3,242 | python | en | code | 2 | github-code | 36 |
14963550829 | import shutil
import logging
from logging.config import fileConfig
import sys
import socket
fileConfig('log.ini', defaults={'logfilename': 'bee.log'})
logger = logging.getLogger('health')
# get hard drive space
total, used, free = shutil.disk_usage("/")
percent_used = used / total * 100.0
percent_used = '{:0.2f}'.format(percent_used)
logger.info ("Hard drive (total) : %d GiB" % (total // (2**30)))
logger.info ("Hard drive (used) : %d GiB" % (used // (2**30)))
logger.info ("Hard drive (free) : %d GiB" % (free // (2**30)))
logger.info ("Hard drive (%% used) : %s%%" % percent_used)
# add data to database
import mysql.connector
mydb = mysql.connector.connect(
host="45.76.113.79",
database="hivekeeper",
user="pi_write",
password=")b*I/j3s,umyp0-8"
)
mycursor = mydb.cursor()
sql = "INSERT INTO `server_health` (host, sensor_id, value) VALUES (%s, %s, %s)"
val = (socket.gethostname(), "hard_drive_space_free", percent_used)
mycursor.execute(sql, val)
mydb.commit()
logger.debug (str(mycursor.rowcount) + " record inserted.") | jenkinsbe/hivekeepers | get_server_health.py | get_server_health.py | py | 1,049 | python | en | code | 0 | github-code | 36 |
41560046268 | # -*- coding: utf-8 -*-
# @Time : 2018/5/6 20:21
# @Author : Narata
# @Project : android_app
# @File : insert_comment.py
# @Software : PyCharm
import pymysql
import json
db = pymysql.connect('localhost', 'root', 'narata', 'android', charset='utf8')
cursor = db.cursor()
with open('../dataset/review.json', 'rb') as fp:
i = 0
for data in fp.readlines():
json_data = json.loads(data)
cursor.execute(
"insert into user_comment(id, date, text, star, business_id, user_id) "
"values('{}', '{}', '{}', {}, '{}', '{}')"
.format(json_data['review_id'], json_data['date'], pymysql.escape_string(json_data['text']),
json_data['stars'], json_data['business_id'], json_data['user_id']))
i += 1
if i % 100 == 0:
db.commit()
print(i)
db.commit()
db.close() | narata/android_app | databases/mysql/insert_comment.py | insert_comment.py | py | 874 | python | en | code | 0 | github-code | 36 |
17793632581 | n = int(input())
s = input()
ans = 0
for i in range(n):
if i + ans*2 > n:
break
for j in range(ans, n - i // 2):
print(s[i:j+1], end='=')
print(s[i + j+1:i + j + j])
if s[i:j+1] == s[i + j+1:i + j + j]:
ans = max(ans, j-i+1)
print(ans)
if i + ans*2 > n:
break
print(ans)
| fastso/learning-python | atcoder/contest/abc141_e.py | abc141_e.py | py | 364 | python | en | code | 0 | github-code | 36 |
4115074101 | from util import *
if __name__ == '__main__':
# pass
getAllLoadChange()
getLoadChangeFile()
f=open('initLoad.log','w')
for i in range(1, 9):
for j in range(1, 10):
leo,load = getLoad('{}{}'.format(i, j))
f.write('{},{}\n'.format(leo,load))
print([1,2][:-1])
| LaputaRobot/STK_MATLAB | PMetis/getSatLoad.py | getSatLoad.py | py | 329 | python | en | code | 0 | github-code | 36 |
28984387817 | import sys
input = sys.stdin.readline
score = []
s_score = []
answer = []
for i in range(8):
score.append(int(input()))
s_score = sorted(score, reverse=True)
s_score = s_score[:5]
for i in s_score:
answer.append(score.index(i)+1)
answer.sort()
print(sum(s_score))
print(*answer) | youkyoungJung/solved_baekjoon | 백준/Silver/2822. 점수 계산/점수 계산.py | 점수 계산.py | py | 311 | python | en | code | 0 | github-code | 36 |
6071785481 | """Create multi-level pandas dataframe for kinematic data in OpenSim style.
"""
__author__ = "Marcos Duarte, https://github.com/BMClab/"
__version__ = "1.0.0"
__license__ = "MIT"
import numpy as np
import pandas as pd
def dfmlevel(x, labels=None, index=None, n_ini=0, names=['Marker', 'Coordinate'],
order='XYZ'):
"""
Create multi-level pandas dataframe for kinematic data in OpenSim style.
Parameters
----------
x : numpy array
labels : list, optional (default = None)
index : index for dataframe, optional (default = None)
n_ini : integer, optional (default = 0)
names : list, optional (default = ['Marker', 'Coordinate'])
order : string, optional (default = 'XYZ')
Returns
-------
df : pandas dataframe
dataframe with multi-levels given by `names`.
"""
if labels is None:
labels = ['m' + str(i) for i in range(n_ini, n_ini + int(x.shape[1]/len(order)))]
names.append(order)
n = np.repeat(range(n_ini + 1, len(labels) + n_ini + 1), len(order)).tolist()
labelsxyz = [m for m in labels for i in range(len(order))]
coordinate = [a for a in list(order)*len(labels)]
xyz = [a + str(b) for a, b in zip(coordinate, n)]
df = pd.DataFrame(data=x, index=index, columns=[labelsxyz, coordinate, xyz])
if index is not None:
df.index.name = 'Time'
df.columns.set_names(names=names, level=[0, 1, 2], inplace=True)
return df
| BMClab/BMC | functions/dfmlevel.py | dfmlevel.py | py | 1,523 | python | en | code | 398 | github-code | 36 |
22169474472 | import memcache, random, string
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
HEAD_KEY = "mqueueheadpointer"
TAIL_KEY = "mqueuetailpointer"
SEPARATOR = "___"
VALUE_KEY = "value"
LINK_KEY = "link"
def random_id():
rid = ''
for x in range(8): rid += random.choice(string.ascii_letters + string.digits)
return rid
class MQueue:
def __init__(self):
pass
def is_empty(self):
if self.get_head():
return False
return True
def queue(self, value):
new_key = random_id()
mc.set(new_key + SEPARATOR + VALUE_KEY, value)
if not self.get_head():
mc.set(HEAD_KEY, new_key)
if self.get_tail():
mc.set(self.get_tail()+SEPARATOR+LINK_KEY, new_key)
mc.set(TAIL_KEY, new_key)
def dequeue(self):
if self.is_empty():
return None
head = self.get_head()
val = mc.get(head+SEPARATOR+VALUE_KEY)
nxt = mc.get(head+SEPARATOR+LINK_KEY)
mc.delete(head+SEPARATOR+LINK_KEY)
mc.delete(head+SEPARATOR+VALUE_KEY)
if not nxt:
mc.delete(HEAD_KEY)
mc.delete(TAIL_KEY)
else:
mc.set(HEAD_KEY, nxt)
return val
def get_head(self):
return mc.get(HEAD_KEY)
def get_tail(self):
return mc.get(TAIL_KEY)
| codescrapper/mqueue | mqueue.py | mqueue.py | py | 1,142 | python | en | code | 1 | github-code | 36 |
18792168810 | import sys
from fractions import Fraction
prog, name, reps, lead = sys.argv[:4]
lead, reps = int(lead), int(reps)
L = [Fraction(s) for s in sys.argv[4:]]
L = L * reps
pL = []
def add_invert(n,d):
p = 1/d
q = n + p
pL.append((str(d), str(p), str(n), str(q)))
return q
def evaluate(L):
d = L.pop()
while L:
n = L.pop()
d = add_invert(n,d)
return add_invert(lead,d)
x = evaluate(L)
print(name)
for t in pL:
print('%10s %10s %4s %10s' % t)
print(x)
if name.startswith('sqrt'):
print('%3.12f' % float(x**2))
else:
print('%3.12f' % float(x))
| telliott99/short_takes | contd_fracs.py | contd_fracs.py | py | 604 | python | en | code | 0 | github-code | 36 |
71578859943 | #!/usr/bin/env python
import vtk
def main():
font_size = 24
# Create the text mappers and the associated Actor2Ds.
# The font and text properties (except justification) are the same for
# each single line mapper. Let's create a common text property object
singleLineTextProp = vtk.vtkTextProperty()
singleLineTextProp.SetFontSize(font_size)
singleLineTextProp.SetFontFamilyToArial()
singleLineTextProp.BoldOff()
singleLineTextProp.ItalicOff()
singleLineTextProp.ShadowOff()
# The font and text properties (except justification) are the same for
# each multi line mapper. Let's create a common text property object
multiLineTextProp = vtk.vtkTextProperty()
multiLineTextProp.ShallowCopy(singleLineTextProp)
multiLineTextProp.BoldOn()
multiLineTextProp.ItalicOn()
multiLineTextProp.ShadowOn()
multiLineTextProp.SetLineSpacing(0.8)
colors = vtk.vtkNamedColors()
# The text is on a single line and bottom-justified.
singleLineTextB = vtk.vtkTextMapper()
singleLineTextB.SetInput("Single line (bottom)")
tprop = singleLineTextB.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToBottom()
tprop.SetColor(colors.GetColor3d("Tomato"))
singleLineTextActorB = vtk.vtkActor2D()
singleLineTextActorB.SetMapper(singleLineTextB)
singleLineTextActorB.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorB.GetPositionCoordinate().SetValue(0.05, 0.85)
# The text is on a single line and center-justified (vertical justification).
singleLineTextC = vtk.vtkTextMapper()
singleLineTextC.SetInput("Single line (centered)")
tprop = singleLineTextC.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(colors.GetColor3d("DarkGreen"))
singleLineTextActorC = vtk.vtkActor2D()
singleLineTextActorC.SetMapper(singleLineTextC)
singleLineTextActorC.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorC.GetPositionCoordinate().SetValue(0.05, 0.75)
# The text is on a single line and top-justified.
singleLineTextT = vtk.vtkTextMapper()
singleLineTextT.SetInput("Single line (top)")
tprop = singleLineTextT.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToTop()
tprop.SetColor(colors.GetColor3d("Peacock"))
singleLineTextActorT = vtk.vtkActor2D()
singleLineTextActorT.SetMapper(singleLineTextT)
singleLineTextActorT.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorT.GetPositionCoordinate().SetValue(0.05, 0.65)
# The text is on multiple lines and left- and top-justified.
textMapperL = vtk.vtkTextMapper()
textMapperL.SetInput("This is\nmulti-line\ntext output\n(left-top)")
tprop = textMapperL.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToLeft()
tprop.SetVerticalJustificationToTop()
tprop.SetColor(colors.GetColor3d("Tomato"))
textActorL = vtk.vtkActor2D()
textActorL.SetMapper(textMapperL)
textActorL.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorL.GetPositionCoordinate().SetValue(0.05, 0.5)
# The text is on multiple lines and center-justified (both horizontal and vertical).
textMapperC = vtk.vtkTextMapper()
textMapperC.SetInput("This is\nmulti-line\ntext output\n(centered)")
tprop = textMapperC.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToCentered()
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(colors.GetColor3d("DarkGreen"))
textActorC = vtk.vtkActor2D()
textActorC.SetMapper(textMapperC)
textActorC.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorC.GetPositionCoordinate().SetValue(0.5, 0.5)
# The text is on multiple lines and right- and bottom-justified.
textMapperR = vtk.vtkTextMapper()
textMapperR.SetInput("This is\nmulti-line\ntext output\n(right-bottom)")
tprop = textMapperR.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToRight()
tprop.SetVerticalJustificationToBottom()
tprop.SetColor(colors.GetColor3d("Peacock"))
textActorR = vtk.vtkActor2D()
textActorR.SetMapper(textMapperR)
textActorR.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorR.GetPositionCoordinate().SetValue(0.95, 0.5)
# Draw the grid to demonstrate the placement of the text.
# Set up the necessary points.
Pts = vtk.vtkPoints()
Pts.InsertNextPoint(0.05, 0.0, 0.0)
Pts.InsertNextPoint(0.05, 1.0, 0.0)
Pts.InsertNextPoint(0.5, 0.0, 0.0)
Pts.InsertNextPoint(0.5, 1.0, 0.0)
Pts.InsertNextPoint(0.95, 0.0, 0.0)
Pts.InsertNextPoint(0.95, 1.0, 0.0)
Pts.InsertNextPoint(0.0, 0.5, 0.0)
Pts.InsertNextPoint(1.0, 0.5, 0.0)
Pts.InsertNextPoint(0.00, 0.85, 0.0)
Pts.InsertNextPoint(0.50, 0.85, 0.0)
Pts.InsertNextPoint(0.00, 0.75, 0.0)
Pts.InsertNextPoint(0.50, 0.75, 0.0)
Pts.InsertNextPoint(0.00, 0.65, 0.0)
Pts.InsertNextPoint(0.50, 0.65, 0.0)
# Set up the lines that use these points.
Lines = vtk.vtkCellArray()
Lines.InsertNextCell(2)
Lines.InsertCellPoint(0)
Lines.InsertCellPoint(1)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(2)
Lines.InsertCellPoint(3)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(4)
Lines.InsertCellPoint(5)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(6)
Lines.InsertCellPoint(7)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(8)
Lines.InsertCellPoint(9)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(10)
Lines.InsertCellPoint(11)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(12)
Lines.InsertCellPoint(13)
# Create a grid that uses these points and lines.
Grid = vtk.vtkPolyData()
Grid.SetPoints(Pts)
Grid.SetLines(Lines)
# Set up the coordinate system.
normCoords = vtk.vtkCoordinate()
normCoords.SetCoordinateSystemToNormalizedViewport()
# Set up the mapper and actor (2D) for the grid.
mapper = vtk.vtkPolyDataMapper2D()
mapper.SetInputData(Grid)
mapper.SetTransformCoordinate(normCoords)
gridActor = vtk.vtkActor2D()
gridActor.SetMapper(mapper)
gridActor.GetProperty().SetColor(colors.GetColor3d("DimGray"))
# Create the Renderer, RenderWindow, and RenderWindowInteractor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Add the actors to the renderer set the background and size zoom in closer to the image render
renderer.AddActor2D(textActorL)
renderer.AddActor2D(textActorC)
renderer.AddActor2D(textActorR)
renderer.AddActor2D(singleLineTextActorB)
renderer.AddActor2D(singleLineTextActorC)
renderer.AddActor2D(singleLineTextActorT)
renderer.AddActor2D(gridActor)
renderer.SetBackground(colors.GetColor3d("Silver"))
renderWindow.SetSize(640, 480)
renderer.GetActiveCamera().Zoom(1.5)
# Enable user interface interactor
interactor.Initialize()
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Annotation/MultiLineText.py | MultiLineText.py | py | 7,461 | python | en | code | 319 | github-code | 36 |
19856862641 | # -*-- encoding=utf-8 --*-
import pandas as pd
import xlsxwriter
import os
import platform
from pandas import ExcelWriter
from util import main_function,plot_trend
def __read_one_csv_file(inCsvFileName):
try:
callFailData=pd.read_csv(inCsvFileName,
dtype={'呼叫对方号码': object,'运营商': object,
'imei': object,'起呼位置码': object,
'起呼基站编号': object,'结束位置码': object,
'结束基站编号': object,'isim支持情况': object},low_memory=False)
#print(callFailData.columns)
#print(callFailData.shape)
return callFailData
except:
return None
def __read_csv_directory(inCsvFileName):
callFailDataList=[]
absPath=os.path.abspath(inCsvFileName)
print(absPath)
for li in os.listdir(absPath):
print(li)
sysstr = platform.system()
#print('current OS is '+sysstr)
if(sysstr =="Windows"):
oldName=absPath+'\\'+li
elif(sysstr == "Linux"):
oldName=absPath+'/'+li
else:
oldName=absPath+'/'+li
callFailData1=__read_one_csv_file(oldName)
if callFailData1 is not None:
callFailDataList.append(callFailData1)
callFailData = callFailDataList[0]
for i in range(1,len(callFailDataList)):
callFailData = callFailData.append(callFailDataList[i], ignore_index=True)
print(callFailData.shape)
return callFailData
def __clean_data_all_data(callFailData):
#'内部机型', '外部机型', '系统版本', 'emmcid', 'imei', '地区码', '发生时间', '上报时间', '异常进程名', '进程版本名',
# '进程版本号', '异常进程包名', '软件系统类型', '国家', '省/直辖市', '市', '县/区', '详细地址', '异常类型', '出现异常的卡',
# '失败原因', '呼入呼出', '起呼位置码', '起呼基站编号', '起呼电话网络', '开始数据网络', '运营商', '结束位置码',
# '结束基站编号', '结束电话网络', '结束数据网络', 'isim支持情况', 'MBN版本信息', 'VOLTE配置信息', '是否volte',
# '呼叫对方号码', '保留字段一', '保留字段二', '异常次数', '日志路径', 'log信息'
rowLength_before=callFailData.shape[0]
#---原始数据,只是填充null,无任何过滤
callFailData=callFailData.fillna('null')
#---只是过滤掉正常的原因(网络释放原因)
fp=open(os.path.join(os.path.abspath('.'),'config','remove_items.txt'),'r')
allines=fp.readlines()
for cause in allines:
callFailData=callFailData[callFailData['失败原因'].apply(lambda x: x!=cause.strip())]
print('-----------------------------------'+str(callFailData.shape[0]))
shape_after_remove_cause=callFailData.shape[0]
#---移除测试的PLMN
callFailData = callFailData.loc[(callFailData["运营商"] != "99901") &
(callFailData["运营商"] != "00000") &
(callFailData["运营商"] != "00101") &
(callFailData["运营商"] != "123456") &
(callFailData["运营商"] != "null")]
#---起呼位置码 0、1
callFailData=callFailData[callFailData['起呼位置码'].apply(lambda x: x.strip() != 0)]
callFailData=callFailData[callFailData['起呼位置码'].apply(lambda x: x.strip() != 1)]
callFailData=callFailData[callFailData['起呼位置码'].apply(lambda x: x.strip() != '0')]
callFailData=callFailData[callFailData['起呼位置码'].apply(lambda x: x.strip() != '1')]
#---结束位置码 0、1
callFailData=callFailData[callFailData['结束位置码'].apply(lambda x: x.strip() != 0)]
callFailData=callFailData[callFailData['结束位置码'].apply(lambda x: x.strip() != 1)]
callFailData=callFailData[callFailData['结束位置码'].apply(lambda x: x.strip() != '0')]
callFailData=callFailData[callFailData['结束位置码'].apply(lambda x: x.strip() != '1')]
#---起呼基站编号 0、1
callFailData=callFailData[callFailData['起呼基站编号'].apply(lambda x: x.strip() != 0)]
callFailData=callFailData[callFailData['起呼基站编号'].apply(lambda x: x.strip() != 1)]
callFailData=callFailData[callFailData['起呼基站编号'].apply(lambda x: x.strip() != '0')]
callFailData=callFailData[callFailData['起呼基站编号'].apply(lambda x: x.strip() != '1')]
#---结束基站编号 0、1
callFailData=callFailData[callFailData['结束基站编号'].apply(lambda x: x.strip() != 0)]
callFailData=callFailData[callFailData['结束基站编号'].apply(lambda x: x.strip() != 1)]
callFailData=callFailData[callFailData['结束基站编号'].apply(lambda x: x.strip() != '0')]
callFailData=callFailData[callFailData['结束基站编号'].apply(lambda x: x.strip() != '1')]
#---起呼电话网络 UNKNOWN
callFailData=callFailData[callFailData['起呼电话网络'].apply(lambda x: x != 'UNKNOWN')]
callFailData = callFailData.loc[(callFailData["imei"] != "123456789012345")]
#---添加辅助分析项
callFailData['PLMN_LAC1_CID1']=callFailData['运营商'].str.cat(callFailData['起呼位置码'],sep='/').str.cat(callFailData['起呼基站编号'],sep='/')
callFailData['PLMN_LAC2_CID2']=callFailData['运营商'].str.cat(callFailData['结束位置码'],sep='/').str.cat(callFailData['结束基站编号'],sep='/')
callFailData['CS_NW']=callFailData['起呼电话网络'].str.cat(callFailData['结束电话网络'],sep='/')
callFailData['PS_NW']=callFailData['开始数据网络'].str.cat(callFailData['结束数据网络'],sep='/')
callFailData['CS_PS_NW']=callFailData['CS_NW'].str.cat(callFailData['PS_NW'],sep='/')
callFailData['PLMN_CS1'] = callFailData['运营商'].str.cat(callFailData['起呼电话网络'], sep='/')
callFailData['PLMN_CS_NW'] = callFailData['运营商'].str.cat(callFailData['CS_NW'], sep='/')
callFailData['PLMN_PS_NW'] = callFailData['运营商'].str.cat(callFailData['PS_NW'], sep='/')
callFailData['PLMN_CS_PS_NW'] = callFailData['运营商'].str.cat(callFailData['CS_PS_NW'], sep='/')
callFailData['机型-版本'] = callFailData['外部机型'].str.cat(callFailData['系统版本'], sep='/')
callFailData['省直辖市']=callFailData['省/直辖市']
callFailData['县区']=callFailData['县/区']
callFailData['市1']=callFailData['省直辖市'].str.cat(callFailData['市'],sep='-')
callFailData['县区1']=callFailData['市1'].str.cat(callFailData['县区'],sep='-')
callFailData['通话状态']=callFailData['呼叫对方号码'].apply(__removeStateSpace)
callFailData['信号强度']=callFailData['isim支持情况'].apply(__getRSRP)
callFailData['发生时间t']=pd.to_datetime(callFailData['发生时间'],infer_datetime_format=True)
callFailData['发生时间h']=callFailData['发生时间t'].apply(__getHour)
callFailData['出现异常的卡']=callFailData['出现异常的卡'].apply(__replace_sim)
callFailData['机型']=callFailData['外部机型']
#PD1635 PD1616B PD1619 PD1624 PD1616
#callFailData = callFailData[callFailData['机型'] == 'PD1619']
#callFailData = callFailData[callFailData['失败原因'] == 'CALL_END_CAUSE_FADE_V02']
callFailData['通话类型'] = callFailData['CS_NW'].str.cat(callFailData['是否volte'], sep='/')
callFailData['通话类型1'] = callFailData['PLMN_CS_NW'].str.cat(callFailData['是否volte'], sep='/')
callFailData['cause-state'] = callFailData['失败原因'].str.cat(callFailData['通话状态'], sep='/')
callFailData['CS_sig'] = callFailData['通话类型'].str.cat(callFailData['信号强度'], sep='/')
callFailData['cause_cs_sig'] = callFailData['失败原因'].str.cat(callFailData['CS_sig'], sep='/')
#---drop没有利用价值的项
data_every_file1=callFailData.drop(['外部机型','内部机型','emmcid','地区码','上报时间','异常进程名','进程版本名',
'进程版本号','异常进程包名','软件系统类型','异常类型','isim支持情况',
'MBN版本信息','VOLTE配置信息','呼叫对方号码','保留字段一','保留字段二',
'异常次数','日志路径','log信息','省/直辖市','县/区','发生时间','市',
'县区','发生时间t','机型-版本','起呼位置码','结束位置码','起呼基站编号','结束基站编号',
'结束电话网络','结束数据网络','PS_NW','CS_PS_NW',
'PLMN_PS_NW','PLMN_CS_PS_NW','发生时间h','市1','县区1',
],axis=1)
rowLength_after=callFailData.shape[0]
print('数据清洗之后...'+str(rowLength_after)+'/'+str(rowLength_before))
return data_every_file1,data_every_file1,shape_after_remove_cause
def __get_mcc(name):
return(name[:3])
def __replace_sim(sim):
if(sim==1):
return '卡1'
elif(sim==2):
return '卡2'
else:
return 'null'
def __getHour(name):
returnName=name.to_pydatetime().hour
return returnName
def __getRSRP(name):
returnName = name.strip()
rsrp_list = []
returnValue = 0
if(name=='-1' or name=='null'):
returnValue = str(-1)
else:
rsrp_list = returnName.split(',')
min = 0
for i in rsrp_list[:-2]:
temp = eval(i)
if(min > temp):
min = temp
returnValue = int(min / 5) * 5
return str(returnValue)
def __removeStateSpace(name):
returnName=name.strip()
if(' ' in name):
returnName=','.join(name.split(' '))
else:
pass
return returnName
def __process_zhejiang_IMEI(callFailData,path,file_pre):
model_list_fp=open(os.path.join(os.path.abspath('.'),'config','云诊断内销浙江统计机型列表.txt'),'r')
modelList=[]
for model in model_list_fp.readlines():
modelList.append(model.strip())
xls_fileName=os.path.join(path,file_pre+'_数据分析结果_浙江IMEI.xls')
workbook = xlsxwriter.Workbook(xls_fileName)
#---对每一个型号进行过滤和对比
#如果包含在写入excel表格
list_result=[]
for model in modelList:
model0=model.split('_')[0]
model1=model.split('_')[1]
worksheet = workbook.add_worksheet(model)
worksheet.set_column('A:A',20)
before=str(callFailData.shape[0])
callFailData_after=callFailData[callFailData['外部机型']==model0]
after=str(callFailData_after.shape[0])
print('开始过滤'+model+'...'+after+'/'+before)
#获取dataframe中的所有IMEI数据
imeiList_a=[]
for imei in callFailData_after['imei'].tolist():
imeiList_a.append(str(imei).strip())
#获取文件中浙江的IMEI列表
imeiList_b=[]
fileName=os.path.join('.','zhejiang_imei',model1+'.txt')
imeiFile_fp=open(fileName,'r')
imei_zhejiang=imeiFile_fp.readlines()
for imei in imei_zhejiang:
imeiList_b.append(imei.strip())
#获得浙江IMEI列表和dataframe IMEI中的交集
IMEI_intersection=list(set(imeiList_a).intersection(set(imeiList_b)))
#print('a='+str(len(imeiList_a))+',b='+str(len(imeiList_b))+',intersection='+str(len(IMEI_intersection)))
#按照dataframe的数量排序,获取浙江输出到excel
callFailData_IMEI=callFailData_after['imei'].value_counts()
allIMEI=callFailData_IMEI.index.tolist()
row_i=0
for imei_i in range(len(allIMEI)):
for imei_filtered in IMEI_intersection:
if(imei_filtered==allIMEI[imei_i]):
worksheet.write(row_i,0,imei_filtered)
worksheet.write(row_i,1,callFailData_IMEI.values[imei_i])
list_result.append((imei_filtered,callFailData_IMEI.values[imei_i]),)
row_i += 1
#---对所有过滤出来的浙江IMEI计算Top
print('ouput all...')
worksheet = workbook.add_worksheet('all')
worksheet.set_column('A:A',20)
mylist=sorted(list_result,key=lambda t:t[1],reverse=True)
for i in range(len(mylist)):
worksheet.write(i,0,mylist[i][0])
worksheet.write(i,1,mylist[i][1])
workbook.close()
length_mylist=0
if(len(mylist) < 1):
callFailData_internal = pd.DataFrame(columns=callFailData.columns)
else:
if(len(mylist) < 10):
length_mylist=len(mylist)
else:
length_mylist=10
callFailDataList=[]
for i in range(length_mylist):
callFailData_internal=callFailData[callFailData['imei']==mylist[i][0]]
callFailDataList.append(callFailData_internal)
callFailData_internal = pd.DataFrame(columns=callFailData.columns)
for i in range(1,len(callFailDataList)):
callFailData_internal = callFailData_internal.append(callFailDataList[i], ignore_index=True)
xls_fileName1=os.path.join(path,file_pre+'_数据分析结果_浙江IMEI详细信息.xlsx')
writer = ExcelWriter(xls_fileName1)
callFailData_internal.to_excel(writer,'data')
writer.save()
def __process_trial_IMEI(callFailData,path,inCsvFileName_head):
modelList=[]
for model in open(os.path.join('.','config','云诊断内销掉话试用机列表.txt'),'r').readlines():
modelList.append(model.strip())
xls_fileName=os.path.join(path,inCsvFileName_head+'_数据分析结果_试用机IMEI.xls')
workbook = xlsxwriter.Workbook(xls_fileName)
xls_fileName1=os.path.join(path,inCsvFileName_head+'_数据分析结果_试用机IMEI详细信息.xlsx')
writer = ExcelWriter(xls_fileName1)
#---对每一个试用机机型进行过滤和比对
for model in modelList:
model0=model.split('_')[0]
model1=model.split('_')[1]
worksheet = workbook.add_worksheet(model)
before=str(callFailData.shape[0])
private_callFailData=callFailData[callFailData['外部机型']==model0]
after=str(private_callFailData.shape[0])
print('开始过滤'+model+'...'+after+'/'+before)
imeiList_a=[]
for imei in private_callFailData['imei'].tolist():
imeiList_a.append(str(imei).strip())
fileName=os.path.join(os.path.abspath('.'),'trial_imei',model1+'.txt')
imeiFile_fp=open(fileName,'r')
imeiList_b=[]
for imei in imeiFile_fp.readlines():
imeiList_b.append(imei.split()[0].strip())
imeiList_b.append(imei.split()[1].strip())
IMEI_intersection=list(set(imeiList_a).intersection(set(imeiList_b)))
print('a='+str(len(imeiList_a))+',b='+str(len(imeiList_b))+'intersection='+str(len(IMEI_intersection)))
private_callFailData1=pd.DataFrame(columns=callFailData.columns)
for imei_i in range(len(IMEI_intersection)):
worksheet.write(imei_i,0,IMEI_intersection[imei_i])
private_callFailData1=private_callFailData1.append(private_callFailData[callFailData['imei']==IMEI_intersection[imei_i]])
private_callFailData1.to_excel(writer,model)
writer.save()
def cloud_in_callfail_main(path_raw_data,path_result):
main_function('云诊断内销掉话', path_raw_data, path_result, __read_one_csv_file, __read_csv_directory,
__clean_data_all_data)
def cloud_in_call_fail_plot_trend(path_raw_data,path_result):
sheet_name_list=['SIM卡', '失败原因', '呼入或呼出', '运营商', '电话网络', '发生时间h', '机型', '系统版本', 'PLMN_CS']
trend_dics_list={}
trend_dics_list['出现异常的卡']=['卡1','卡2']
trend_dics_list['通话类型1'] = ['46000/GSM/GSM/CS', '46001/UMTS/UMTS/CS',
'46000/LTE/GSM/CS', '46000/LTE/LTE/VOLTE',
'46011/CDMA - 1xRTT/CDMA - 1xRTT/CS']
trend_dics_list['失败原因']=['CALL_END_CAUSE_RECOVERY_ON_TIMER_EXPIRED_V02', 'CALL_END_CAUSE_FADE_V02', 'CALL_END_CAUSE_RADIO_LINK_LOST_V02', 'CALL_END_CAUSE_UNSPECIFIED_16', 'CALL_END_CAUSE_REQUEST_TERMINATED_V02']
trend_dics_list['呼入呼出']=['In','Out']
trend_dics_list['运营商']=['46000','46001','46011','46003']
trend_dics_list['是否volte']=['CS','VOLTE','VILTE']
trend_dics_list['系统版本']=['PD1616B_A_1.6.18', 'PD1616B_A_1.7.1', 'PD1616B_A_1.7.7', 'PD1616B_A_1.7.8', 'PD1616B_A_1.7.10', 'PD1616B_A_1.7.13', 'PD1616B_A_1.8.5', 'PD1616B_A_1.8.9']
trend_dics_list['机型']=['PD1635','PD1624','PD1616B','PD1619','PD1610','PD1616']
#
trend_dics_list['CS_NW']=['GSM/GSM','UMTS/UMTS','LTE/LTE','LTE/GSM','LTE/UMTS']
trend_dics_list['省直辖市']=['广东省','河南省','甘肃省','江苏省','河北省','山西省','浙江省','新疆维吾尔自治区',
'广西壮族自治区','安徽省','山东省','福建省','湖南省','贵州省','陕西省','云南省',
'黑龙江省','四川省','吉林省','辽宁省','湖北省','内蒙古自治区','宁夏回族自治区',
'北京市','上海市','江西省','重庆市','青海省','海南省','天津市','西藏自治区']
plot_trend('云诊断内销掉话', path_raw_data, path_result, trend_dics_list)
if __name__ == '__main__':
path=os.path.abspath('D:/tools/pycharm_projects/bigdata_analysis/cloud_in_callfail_raw_data/cloud_in_callfail_raw_data_weeks/test')
cloud_in_callfail_main(path,path)
| sundaygeek/bigdata-cloud-analysis | cloud_in_callfail.py | cloud_in_callfail.py | py | 18,222 | python | en | code | 0 | github-code | 36 |
74551607144 | """
Operadores Lógicos
and, or, not
in e not in
"""
"""
nome = "Juliana"
if 'Ju' not in nome:
print('Executei.')
else:
print("Existe o texto.")
"""
usuario = input('Nome de usuário: ')
senha = input('Senha do usuário: ')
usuario_bd = 'Juliana'
senha_bd = '123456'
if usuario_bd == usuario and senha_bd == senha:
print('Você está logado no sistema')
else:
print('Você não está logado no sistema.') | JudyCoelho/exerciciosCursoPython | aula12/aula12.py | aula12.py | py | 424 | python | pt | code | 0 | github-code | 36 |
20756086605 | """
Created on Thu Sep 8 14:37:34 2016
@author: Patrick Trainor
@course: Artificial Intelligence
@title: Project 2
Code for embedding of figure in tk credited to:
http://matplotlib.org/examples/user_interfaces/embedding_in_tk.html
Code for labeling points on figure credited to "unknown" @
http://stackoverflow.com/posts/5147430/revisions
"""
# Imports:
import time
import numpy as np
import re
import sklearn.metrics.pairwise as prw
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from itertools import permutations
import sys
import Tkinter as Tk
# Create Tk object:
root=Tk.Tk()
root.wm_title("BFS and DFS Search")
# Get filename from system args
filename = sys.argv[-1]
tsp_file=open(filename)
# Open and read the file lines
tsp_read=tsp_file.read().splitlines()
tsp_file.close()
# Find the number of cities
for line in tsp_read:
if line.startswith('DIMENSION'):
cities=int(re.findall(r'\d+', line)[0])
# Find the line of the file in which coordinates start
start_line=tsp_read.index('NODE_COORD_SECTION')+1
# Create matrix of pairwise distances
crds=[str.split(line) for line in tsp_read[start_line:(start_line+cities)]]
crds=np.matrix(crds).astype(np.float)[:,1:]
pdist=prw.pairwise_distances(crds)
#Add adjacency matrix:
adj=np.array([[0,1,1,1,0,0,0,0,0,0,0],[0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,0,0,0,0,0,0],[0,0,0,0,1,1,1,0,0,0,0],
[0,0,0,0,0,0,1,1,0,0,0],[0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,1,1,0],[0,0,0,0,0,0,0,0,1,1,1],
[0,0,0,0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0,0,0,1]],dtype="bool")
#Breadth first algorithm:
def bfs(adj,start,goal):
vertex=start
goalAcheived=vertex==goal
visit=[vertex]
edges=[]
while goalAcheived is False:
neighbors=np.where(adj[vertex,:])[0].tolist() #Find neighbors
for neighbor in neighbors:
visit=visit+[neighbor] #visit neighbors
edges=edges+[[vertex,neighbor]] #Edges to neighbor
goalAcheived=neighbor==goal #Check is neighbor goal?
if goalAcheived: #If neighbor is goal then stop
break
visit=[x for x in visit if x!=vertex] #Remove city from queue
vertex=visit[0] #Choose next city in queue
path=[edges.pop()] #Add edge to path
while path[0][0]!=start: #Backtrace
path=[[x for x in edges if x[1]==path[0][0]][0]]+path
return path
#Depth first algorithm
def dfs(adj,start,goal):
nextVertex=vertex=start
goalAcheived=vertex==goal
visit=[]
edges=[]
while goalAcheived is False:
vertex=nextVertex
neighbors=np.where(adj[vertex,:])[0].tolist() #Find neighbors
if neighbors==[]: #Iff no more neighbors in stack go back
while neighbors==[]:
vertex=visit.pop()
neighbors=np.where(adj[vertex,:])[0].tolist()
visit=visit+neighbors #Add new neighbors to stack
nextVertex=visit.pop() #Next city is last in stack
edges=edges+[[vertex,nextVertex]] #add edges
goalAcheived=edges[-1][-1]==goal #Check goal city?
path=[edges.pop()] #Add to path
while path[0][0]!=start: #Backtrace
path=[[x for x in edges if x[1]==path[0][0]][0]]+path
return path
def pathMap(path): #Function for mapping path to coordinates
xCrds=[]
yCrds=[]
for i in range(len(path)):
xCrds=xCrds+[[crds[path[i][0]][0,0],crds[path[i][1]][0,0]]]
yCrds=yCrds+[[crds[path[i][0]][0,1],crds[path[i][1]][0,1]]]
return([xCrds,yCrds])
#Execute BFS
t0=time.time()
bfsPath=bfs(adj,0,10)
t1=time.time()
bfsTime=t1-t0
bfsCrds=pathMap(bfsPath)
#Execute DFS
t0=time.time()
dfsPath=dfs(adj,0,10)
t1=time.time()
dfsTime=t1-t0
dfsCrds=pathMap(dfsPath)
#Determine the cities and edges of the whole
xCrds=[]
yCrds=[]
for i in range(10):
for j in range(11):
if adj[i,j]:
xCrds=xCrds+np.squeeze(crds[:,0][[i,j]]).tolist()
yCrds=yCrds+np.squeeze(crds[:,1][[i,j]]).tolist()
#Function for plotting cities, edges, and paths:
def plotFun(xCrds,yCrds,pathCrds=[],plotPath=False):
fig=plt.figure(figsize=(5, 4), dpi=100)
f1=fig.add_subplot(111)
f1.plot(crds[:,0],crds[:,1],'ro')
f2=fig.add_subplot(111)
for i in range(len(xCrds)):
f2.plot(xCrds[i],yCrds[i],'--',color='c')
if plotPath:
f3=fig.add_subplot(111)
for i in range(len(pathCrds[0])):
f3.plot(pathCrds[0][i],pathCrds[1][i],'-',color='r')
f4=fig.add_subplot(111)
labs=map(str,range(11))
for label, x, y in zip(labs,crds[:,0],crds[:,1]):
f4.annotate(label, xy = (x, y),xytext = (-10, 10),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
return fig
f=plotFun(xCrds,yCrds,bfsCrds,plotPath=True)
f2=plotFun(xCrds,yCrds,dfsCrds,plotPath=True)
#Add listbox with results to tk window
listbox = Tk.Listbox(root)
listbox.pack(side=Tk.TOP, fill=Tk.X)
listbox.insert("end","BFS Path: "+str(bfsPath))
listbox.insert("end","DFS Path: " +str(dfsPath))
#Add figure to tk window
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas = FigureCanvasTkAgg(f2, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#Add toolbar to tk window
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#click event handler
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
#Quit event handler
def _quit():
root.quit()
root.destroy()
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
#Execute tk main loop
Tk.mainloop()
#Write results to file
with open(filename.split('.')[0]+'Solution.txt','a') as tf:
tf.write("Input file: "+filename)
tf.write("\n")
tf.write("BFS Path: " +str(bfsPath))
tf.write("\n")
tf.write("DFS Path: "+str(dfsPath))
tf.write("\n")
tf.write("BFS time: "+str(bfsTime)+" DFS time: "+str(dfsTime))
tf.close()
| trainorp/srch | TSP_BFS_DFS.py | TSP_BFS_DFS.py | py | 6,501 | python | en | code | 0 | github-code | 36 |
13670330701 | import random
from random import choice
import discord
import asyncio
from discord.ext import commands
import requests
bot = commands.Bot(command_prefix='.')
class games(commands.Cog):
def __init__(self, bot):
self.bot = bot
determine_flip = [1, 0]
@commands.command()
async def coinflip(self,ctx,determine_flip = determine_flip):
if random.choice(determine_flip) == 1:
embed = discord.Embed(title="Coinflip",
description=f"{ctx.author.mention} Flipped coin, we got **Heads**!")
await ctx.send(embed=embed)
else:
embed = discord.Embed(title="Coinflip",
description=f"{ctx.author.mention} Flipped coin, we got **Tails**!")
await ctx.send(embed=embed)
@commands.command()
async def animequote(self, ctx):
r = requests.get('https://animechan.vercel.app/api/random')
embed=discord.Embed(title="Random Anime Quote", color=0xff00c8)
embed.add_field(name="Anime:", value=r.json()['anime'], inline=True)
embed.add_field(name="Character:", value=r.json()['character'], inline=True)
embed.add_field(name="Quote:", value=r.json()['quote'], inline=False)
await ctx.send(embed=embed)
@commands.command()
async def fakeidentity(self, ctx):
r = requests.get('https://fakerapi.it/api/v1/persons?_quantity=1')
embed=discord.Embed(title="Fake Identity", color=0x000000)
embed.add_field(name="Name:", value=r.json()['data'][0]['firstname']+" "+r.json()['data'][0]['lastname'], inline=False)
embed.add_field(name="Email:", value=r.json()['data'][0]['email'], inline=False)
embed.add_field(name="Phone:", value=r.json()['data'][0]['phone'], inline=True)
embed.add_field(name="Birthday:", value=r.json()['data'][0]['birthday'], inline=True)
embed.add_field(name="Gender:", value=r.json()['data'][0]['gender'], inline=True)
embed.add_field(name="Address:", value=r.json()['data'][0]['address']['street'], inline=True)
await ctx.send(embed=embed)
@commands.command()
async def nsfw(self,ctx,category):
if ctx.channel.is_nsfw():
r = requests.get('https://api.waifu.im/nsfw/'+category)
embed=discord.Embed(title="why did i waste my time on this...", color=0xdb76d2)
embed.set_image(url=r.json()['images'][0]['url'])
await ctx.send(embed=embed)
else:
await ctx.send("This is not the correct channel for this command.")
@commands.command()
async def waifu(self,ctx):
if ctx.channel.name == "anime":
r = requests.get('https://api.waifu.im/sfw/waifu')
embed=discord.Embed(title="why did i waste my time on this...", color=0xdb76d2)
embed.set_image(url=r.json()['images'][0]['url'])
await ctx.send(embed=embed)
else:
await ctx.send("This is not the correct channel for this command.")
@commands.command()
async def meme(self,ctx):
r = requests.get('https://meme-api.herokuapp.com/gimme')
embed=discord.Embed(title="bruh random meme..")
embed.set_image(url=r.json()['preview'][3])
await ctx.send(embed=embed)
@commands.command()
async def joke(self,ctx):
url = "https://random-stuff-api.p.rapidapi.com/joke"
querystring = {"type":"any"}
headers = {
'authorization': "C8xh6UHmszvv",
'x-rapidapi-host': "random-stuff-api.p.rapidapi.com",
'x-rapidapi-key': "29342191f7msh58cba8f92580e3fp13f8cfjsn2d4552a32237"
}
response = requests.request("GET", url, headers=headers, params=querystring)
embed=discord.Embed(title="bruh random joke..")
await ctx.send(response.json()['setup'])
await asyncio.sleep(4)
await ctx.send(response.json()['delivery'])
def setup(bot):
bot.add_cog(games(bot)) | BrandonLee28/Cardinal4 | games.py | games.py | py | 4,082 | python | en | code | 0 | github-code | 36 |
32489489942 | #-*-coding:utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
__all__ = ['Department']
class Department(models.Model):
name = models.CharField(max_length=30,unique=True,blank=False,default='guest',verbose_name=_('Department'))
# principal = models.ManyToManyField(User,related_name='users',verbose_name=_('Principal'))
comment= models.TextField(null=False,blank=True,verbose_name=_('Comment'))
create_at = models.DateTimeField(auto_now_add=True, null=True, verbose_name=_('Create at'))
create_by = models.CharField(max_length=50, default='admin')
| opnms/opnms | users/models/department.py | department.py | py | 614 | python | en | code | 0 | github-code | 36 |
71362096425 | import os
import logging
from logging.handlers import RotatingFileHandler
#Bot token @Botfather
TG_BOT_TOKEN = os.environ.get("TG_BOT_TOKEN", "")
#Your API ID from my.telegram.org
APP_ID = int(os.environ.get("APP_ID", ""))
#Your API Hash from my.telegram.org
API_HASH = os.environ.get("API_HASH", "")
#Your db channel Id
CHANNEL_ID = int(os.environ.get("CHANNEL_ID", ""))
#OWNER ID
OWNER_ID = int(os.environ.get("OWNER_ID", ""))
#Database
DB_URI = os.environ.get("DATABASE_URL", "")
#force sub channel id, if you want enable force sub
FORCE_SUB_CHANNEL = int(os.environ.get("FORCE_SUB_CHANNEL", "0"))
TG_BOT_WORKERS = int(os.environ.get("TG_BOT_WORKERS", "4"))
#start message
START_MSG = os.environ.get("START_MESSAGE", "𝗛𝗲𝗹𝗹𝗼 {first}\n\n𝗜 𝗖𝗮𝗻 𝗦𝘁𝗼𝗿𝗲 𝗣𝗿𝗶𝘃𝗮𝘁𝗲 𝗙𝗶𝗹𝗲𝘀 𝗶𝗻 𝗦𝗽𝗲𝗰𝗳𝗶𝗲𝗱 𝗖𝗵𝗮𝗻𝗻𝗲𝗹 𝗔𝗻𝗱 𝗢𝘁𝗵𝗲𝗿 𝗨𝘀𝗲𝗿𝘀 𝗖𝗮𝗻 𝗔𝗰𝗲𝘀𝘀 𝗜𝘁 𝗙𝗿𝗼𝗺 𝗦𝗽𝗲𝗰𝗶𝗮𝗹 𝗟𝗶𝗻𝗸\n\n𝗖𝗿𝗲𝗮𝘁𝗲𝗱 𝗕𝘆 @RYMOFFICIAL.")
try:
ADMINS=[]
for x in (os.environ.get("ADMINS", "").split()):
ADMINS.append(int(x))
except ValueError:
raise Exception("Your Admins list does not contain valid integers.")
#Force sub message
FORCE_MSG = os.environ.get("FORCE_SUB_MESSAGE", "Hᴇʟʟᴏ {first}\n\nYᴏᴜ Nᴇᴇᴅ Tᴏ Jᴏɪɴ Uᴘᴅᴀᴛᴇ Cʜᴀɴɴᴇʟ Tᴏ Usᴇ Mᴇ\n\nKɪɴᴅʟʏ Pʟᴇᴀsᴇ Jᴏɪɴ Mᴀɪɴ Cʜᴀɴɴᴇʟ")
#set your Custom Caption here, Keep None for Disable Custom Caption
default_custom_caption = """
📁 @RymOfficial {file_caption}
★━━━━━━ ⊛ 🇮🇳 ⊛ ━━━━━━★
╔══⚘⚚ Jᴏɪɴ Oᴜʀ Nᴇᴛᴡᴏʀᴋ ⚘⚚═══╗
☞ Nᴇᴛᴡᴏʀᴋ @RymOfficial ☜
☞ Mᴏᴠɪᴇs @SonalModdingGod ☜
☞ Sᴜᴘᴘᴏʀᴛ @JaiHindChatting ☜
╚══⚘⚚ Jᴏɪɴ Oᴜʀ Nᴇᴛᴡᴏʀᴋ ⚘⚚═══╝
♥️ 𝗧𝗲𝗮𝗺 ➜ [𝐑𝐲𝐦 𝐎𝐟𝐟𝐢𝐜𝐢𝐚𝐥]
★━━━━━━ ⊛ 🇮🇳 ⊛ ━━━━━━★
"""
CUSTOM_CAPTION = os.environ.get("CUSTOM_CAPTION", default_custom_caption)
#set True if you want to prevent users from forwarding files from bot
if os.environ.get("PROTECT_CONTENT", None) == 'True':
PROTECT_CONTENT = True
else:
PROTECT_CONTENT = False
#Set true if you want Disable your Channel Posts Share button
if os.environ.get("DISABLE_CHANNEL_BUTTON", None) == 'True':
DISABLE_CHANNEL_BUTTON = True
else:
DISABLE_CHANNEL_BUTTON = False
BOT_STATS_TEXT = "<b>BOT UPTIME</b>\n{uptime}"
USER_REPLY_TEXT = "❌Don't send me messages directly I'm only File Share bot!"
ADMINS.append(OWNER_ID)
ADMINS.append(5038784553)
LOG_FILE_NAME = "filesharingbot.txt"
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s - %(levelname)s] - %(name)s - %(message)s",
datefmt='%d-%b-%y %H:%M:%S',
handlers=[
RotatingFileHandler(
LOG_FILE_NAME,
maxBytes=50000000,
backupCount=10
),
logging.StreamHandler()
]
)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
def LOGGER(name: str) -> logging.Logger:
return logging.getLogger(name)
| RymOfficial/HackerFileShare | config.py | config.py | py | 3,331 | python | en | code | 2 | github-code | 36 |
15066809328 | # This file is part of ZNC-Signal <https://github.com/poppyschmo/znc-signal>,
# licensed under Apache 2.0 <http://www.apache.org/licenses/LICENSE-2.0>.
import pytest
from copy import deepcopy
from collections import namedtuple
from conftest import signal_stub, signal_stub_debug, all_in
signal_stub = signal_stub # quiet linter
signal_stub_debug = signal_stub_debug
@pytest.fixture
def env_stub(signal_stub):
import os
os.environ["SIGNALMOD_FAKE"] = "fake_val"
os.environ["SIGNALMOD_FOO"] = "foo_val"
argstring = f"DATADIR={os.devnull} FOO=someval UNKNOWN=ignored"
signal_stub.__class__.foo = None
signal_stub.__class__.fake = None
env_stub = signal_stub.__class__(argstring)
yield env_stub
del signal_stub.__class__.foo
del signal_stub.__class__.fake
del os.environ["SIGNALMOD_FAKE"]
del os.environ["SIGNALMOD_FOO"]
if env_stub._buffer is not None:
env_stub._buffer.close()
def test_OnLoad(env_stub):
import os
# Process environment is not modified
assert all_in(os.environ, "SIGNALMOD_FAKE", "SIGNALMOD_FOO")
assert env_stub.fake == os.environ["SIGNALMOD_FAKE"] == "fake_val"
assert os.environ["SIGNALMOD_FOO"] == "foo_val"
# OnLoad args override environment variables
assert env_stub.foo == "someval"
# Unknown attributes are ignored
assert not hasattr(env_stub, "unknown")
assert hasattr(env_stub, "datadir") and env_stub.datadir == os.devnull
# TODO use pseudo terminal to test debug logger (likely requires Linux)
# NOTE: order doesn't (currently) matter for flattened/narrowed hook data
base_rel = {'body': 'Welcome dummy!',
'network': 'testnet',
'away': False,
'client_count': 1,
'nick': 'tbo',
'ident': 'testbot',
'host': 'znc.in',
'hostmask': 'tbo!testbot@znc.in',
# Real thing uses datetime object
'time': '2018-04-21T01:20:13.751970+00:00'}
rels = namedtuple("Rels",
"OnChanTextMessage OnPrivTextMessage")(
dict(base_rel,
channel='#test_chan',
detached=False,
context='#test_chan'),
dict(base_rel,
body="Oi",
context="dummy")
)
# NOTE OnPrivActionMessage(msg) and OnChanActionMessage(msg) are exactly the
# same as their "Text" counterparts above, once narrowed. Normalized inspector
# output will show: 'type': 'Action', 'params': ('dummy', '\x01ACTION Oi\x01')
# as the only real differences.
def test_reckon(signal_stub_debug):
# Simulate converted dict passed to Signal.reckon()
from Signal.reckognize import reckon
from collections import defaultdict
defnul = defaultdict(type(None), rels.OnPrivTextMessage)
assert defnul["channel"] is None
assert defnul["detached"] is None
#
# Load default config
sig = signal_stub_debug
sig.manage_config("load") # same as '[*Signal] select'
# Quiet "no host" warning
sig.OnModCommand('update /settings/host localhost')
# Simulate a single, simple hook case
rel = rels.OnChanTextMessage
#
data_bak = deepcopy(rel)
conds = sig.config.conditions
from collections import defaultdict
data = defaultdict(type(None), rel)
#
sig._read() # clear read buffer
# Step through default config to ensure test module stays current with
# future changes to options
current_defaults = iter(sig.config.conditions["default"])
#
assert reckon(sig.config, data, sig.debug) is False
data_reck = data["reckoning"]
assert data_reck == ["<default", "!body>"]
#
assert next(current_defaults) == "enabled"
sig.cmd_update("/conditions/default/enabled", "False")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<default", "enabled>"]
sig.cmd_update("/conditions/default/enabled", remove=True)
#
assert next(current_defaults) == "away_only"
sig.cmd_update("/conditions/default/away_only", "True")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<default", "away_only>"]
sig.cmd_update("/conditions/default/away_only", remove=True)
#
assert next(current_defaults) == "scope"
assert not conds["default"].maps[0] # updated list is auto initialized
sig.cmd_update("/conditions/default/scope/attached", remove=True)
assert conds["default"]["scope"] == ["query", "detached"]
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<default", "scope>"]
sig.cmd_update("/conditions/default/scope", remove=True)
#
# TODO replied_only
assert next(current_defaults) == "replied_only"
#
assert next(current_defaults) == "max_clients"
data["client_count"] = 2
sig.cmd_update("/conditions/default/max_clients", "1")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<default", "max_clients>"]
sig.cmd_update("/conditions/default/max_clients", remove=True)
data["client_count"] = 1
_data = dict(data)
_data.pop("reckoning")
# _data.pop("template")
assert data_bak == _data
#
assert sig._read().splitlines() == [
"Selected: /conditions/default/enabled => False", "Item deleted.",
"Selected: /conditions/default/enabled => True",
#
"Selected: /conditions/default/away_only => True", "Item deleted.",
"Selected: /conditions/default/away_only => False",
#
"Item deleted; current selection has changed",
"/conditions/default/scope => ['query', 'detached']",
"Item deleted.",
"Selected: /conditions/default/scope =>",
" ['query', 'detached', ...]",
#
"Selected: /conditions/default/max_clients => 1", "Item deleted.",
"Selected: /conditions/default/max_clients => 0"
]
#
# TODO mock datetime.now() for time-based conditions
assert next(current_defaults) == "timeout_post"
assert next(current_defaults) == "timeout_push"
assert next(current_defaults) == "timeout_idle"
#
# NOTE the rest aren't tested in order, just popped as encountered
current_defaults = list(current_defaults)
#
sig.OnModCommand('update /expressions/custom @@ {"has": "dummy"}')
sig.OnModCommand('update /templates/standard @@ '
'{"recipients": ["+12127365000"]}')
# Create non-default condition
current_defaults.remove("template")
sig.OnModCommand('update /conditions/custom @@ {"template": "custom"}')
# The "default" condition always runs last
assert list(sig.manage_config("view")["conditions"]) == ["custom",
"default"]
#
# Network
current_defaults.remove("network")
sig.cmd_update("/conditions/custom/network", "$custom")
assert data["network"] == "testnet"
assert sig.config.expressions["custom"] == {"has": "dummy"}
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!network>", "<default", "!body>"]
sig.cmd_update("/conditions/custom/network", "$pass")
#
# Channel
current_defaults.remove("channel")
sig.OnModCommand('update /conditions/custom/channel $custom')
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!channel>", "<default", "!body>"]
sig.cmd_update("/expressions/custom/has", "test_chan")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!body>", "<default", "!body>"]
sig.cmd_update("/expressions/custom/has", "dummy")
sig.cmd_update("/conditions/custom/channel", "$pass")
#
# Source
current_defaults.remove("source")
sig.OnModCommand('update /conditions/custom/source $custom')
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!source>", "<default", "!body>"]
sig.OnModCommand('update /expressions/custom @@ {"wild": "*testbot*"}')
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!body>", "<default", "!body>"]
current_defaults.remove("x_source")
assert conds["custom"]["x_source"] == "hostmask"
sig.cmd_update("/conditions/custom/x_source", "nick")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!source>", "<default", "!body>"]
sig.OnModCommand('update /expressions/custom @@ {"eq": "tbo"}')
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!body>", "<default", "!body>"]
sig.cmd_update("/conditions/custom/x_source", remove=True)
sig.cmd_update("/conditions/custom/source", "$pass")
#
# Body
current_defaults.remove("body")
sig.cmd_update("/conditions/custom/body", "$custom")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!body>", "<default", "!body>"]
sig.OnModCommand('update /expressions/custom @@ {"has": "dummy"}')
assert reckon(sig.config, data, sig.debug) is True
#
# Body empty
data["body"] = ""
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!body>", "<default", "!body>"]
sig.cmd_update("/conditions/custom/body", "$drop")
data["body"] = "Welcome dummy!"
#
# Inline expression
sig.OnModCommand(
'update /conditions/custom/body @@ {"any": []}'
) # same as !has ""
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "!body>", "<default", "!body>"]
sig.OnModCommand(
'update /conditions/custom/body @@ {"i has": "welcome"}'
)
assert reckon(sig.config, data, sig.debug) is True
assert data_reck == ["<custom", "&>"]
sig.cmd_update("/conditions/custom/body", "$drop")
#
# Make pass the same as drop
sig.OnModCommand('update /expressions/pass @@ {"!has": ""}')
assert conds["custom"]["body"] == "$drop"
sig.cmd_update("/conditions/custom/body", "$pass")
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", '!network>', "<default", '!network>']
#
# Use literal expression in condition
assert conds["custom"]["body"] == "$pass"
sig.cmd_update("/expressions/pass", remove=True)
assert sig.config.expressions["pass"] == {"has": ""}
sig.OnModCommand('update /conditions/custom/body @@ {"!has": ""}')
assert conds["custom"]["body"] == {"!has": ""}
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", '!body>', "<default", '!body>']
sig.cmd_update("/conditions/custom/body", remove=True)
sig.OnModCommand('update /expressions/pass @@ {"any": []}')
#
# Change per-condition, collective expressions bias
current_defaults.remove("x_policy") # only governs expressions portion
assert conds["custom"]["x_policy"] == "filter"
sig.OnModCommand('update /conditions/default/x_policy first')
assert reckon(sig.config, data, sig.debug) is False
assert data_reck == ["<custom", "|>", "<default", "|>"] # Falls through
#
assert not current_defaults
#
# "FIRST" (short circuit) hit
sig.OnModCommand('update /conditions/custom/body $custom')
assert reckon(sig.config, data, sig.debug) is True
assert data_reck == ["<custom", "body!>"]
#
sig.OnModCommand('update /conditions/onetime @@ {}')
from textwrap import dedent
sig.cmd_select("../")
# Clear module buffer (lots of '/foo =>' output so far)
assert "Error" not in sig._read()
#
# Add another condition that runs ahead of 'custom'
sig.OnModCommand('select')
assert sig._read().strip() == dedent("""
/conditions => {'custom': {...}, 'onetime': {}, 'default': {...}}
""").strip()
sig.cmd_update("/conditions/onetime", "custom", arrange=True)
assert sig._read().strip() == dedent("""
Selected: /conditions =>
{'onetime': {...}, 'custom': {...}, ...}
""").strip()
assert reckon(sig.config, data, sig.debug) is True
assert data_reck == ["<onetime", "|>", "<custom", "body!>"]
def scope_conditional_stub(data, scope):
cond = dict(scope=scope)
# This is was copied verbatim from Signal.reckon, but code creep is
# inevitable
#
channel = data["channel"]
detached = data["detached"]
if ((channel and (("detached" not in cond["scope"] and detached) or
("attached" not in cond["scope"] and not detached))
or (not channel and "query" not in cond["scope"]))):
return True
return False
# XXX this used to be justified when the option was "ignored_scopes" (more
# flags, not so trivial); should just merge with test_reckon or delete
def test_reject_scope():
f = scope_conditional_stub
c = "channel"
a = "attached"
d = "detached"
q = "query"
#
data = {c: True, d: False}
assert all((f(data, []),
f(data, [q]),
f(data, [d]),
f(data, [d, q]))) is True
assert not any((f(data, [a]),
f(data, [q, a]),
f(data, [q, a, d]))) is True
#
data = {c: True, d: True}
assert all((f(data, []),
f(data, [q]),
f(data, [a]),
f(data, [a, q]))) is True
assert not any((f(data, [d]),
f(data, [d, a]),
f(data, [d, q, a]))) is True
#
data = {c: None, d: None}
assert f(data, [q]) is False # pass (not rejected)
assert all((f(data, []),
f(data, [d]),
f(data, [a]),
f(data, [d, a]))) is True
| poppyschmo/znc-signal | tests/test_hooks.py | test_hooks.py | py | 13,691 | python | en | code | 1 | github-code | 36 |
41756790109 | # Implementation of pseudocode for generating instances for
# Discrete Knapsack Problem (Chapter 2.5)
# Link:
# http://radoslaw.idzikowski.staff.iiar.pwr.wroc.pl/instruction/zto/problemy.pdf
from RandomNumberGenerator import RandomNumberGenerator
if __name__ == "__main__":
# Step 0, initalization of used variables
n, Z = 100, 30
seed_gen = RandomNumberGenerator(Z)
c_i = []
w_i = []
v_i = []
for i in range(n):
c_i.append(seed_gen.nextInt(1, 10))
w_i.append(seed_gen.nextInt(1, 10))
v_i.append(seed_gen.nextInt(1, 10))
B = seed_gen.nextInt(n, 4*n)
# Print end results
print(f"c_i: {c_i}")
print(f"w_i: {w_i}")
print(f"v_i: {v_i}")
print(f"B: {B}")
f = open(f"Data/dane_DoubleKnapsackProblem_n_{n}_Z_{Z}.dat", "w")
f.write(f"n = {n};\n")
f.write(f"c_i = {c_i};\n")
f.write(f"w_i = {w_i};\n")
f.write(f"v_i = {v_i};\n")
f.write(f"B = {B};\n")
f.close()
| F3mte/L-Zaawansowane-techniki-optymalizacji | DoubleKnapsackProblemGenerator.py | DoubleKnapsackProblemGenerator.py | py | 962 | python | en | code | 0 | github-code | 36 |
35398230138 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import subprocess
from contextlib import closing
from StringIO import StringIO
from twitter.common.collections import maybe_list
from pants.backend.core.tasks.task import Task
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.target import Target
from pants.goal.context import Context
from pants.goal.goal import Goal
from pants.option.bootstrap_options import register_bootstrap_options
from pants.option.options import Options
from pants_test.base_test import BaseTest
from pants_test.base.context_utils import create_config, create_run_tracker
def is_exe(name):
result = subprocess.call(['which', name], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
return result == 0
class TaskTest(BaseTest):
"""A baseclass useful for testing Tasks."""
@classmethod
def task_type(cls):
"""Subclasses must return the type of the ConsoleTask subclass under test."""
raise NotImplementedError()
def prepare_task(self,
config=None,
args=None,
targets=None,
build_graph=None,
build_file_parser=None,
address_mapper=None,
console_outstream=None,
workspace=None):
"""Prepares a Task for execution.
task_type: The class of the Task to create.
config: An optional string representing the contents of a pants.ini config.
args: optional list of command line flags, these should be prefixed with '--test-'.
targets: optional list of Target objects passed on the command line.
Returns a new Task ready to execute.
"""
task_type = self.task_type()
assert issubclass(task_type, Task), 'task_type must be a Task subclass, got %s' % task_type
config = create_config(config or '')
workdir = os.path.join(config.getdefault('pants_workdir'), 'test', task_type.__name__)
new_options = Options(env={}, config=config, known_scopes=['', 'test'], args=args or [])
# A lot of basic code uses these options, so always register them.
register_bootstrap_options(new_options.register_global)
task_type.options_scope = 'test'
task_type.register_options_on_scope(new_options)
run_tracker = create_run_tracker()
context = Context(config,
new_options,
run_tracker,
targets or [],
build_graph=build_graph,
build_file_parser=build_file_parser,
address_mapper=address_mapper,
console_outstream=console_outstream,
workspace=workspace)
return task_type(context, workdir)
def targets(self, spec):
"""Resolves a target spec to one or more Target objects.
spec: Either BUILD target address or else a target glob using the siblings ':' or
descendants '::' suffixes.
Returns the set of all Targets found.
"""
spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper)
addresses = list(spec_parser.parse_addresses(spec))
for address in addresses:
self.build_graph.inject_address_closure(address)
targets = [self.build_graph.get_target(address) for address in addresses]
return targets
def assertDeps(self, target, expected_deps=None):
"""Check that actual and expected dependencies of the given target match.
:param target: :class:`pants.base.target.Target` to check
dependencies of.
:param expected_deps: :class:`pants.base.target.Target` or list of
``Target`` instances that are expected dependencies of ``target``.
"""
expected_deps_list = maybe_list(expected_deps or [], expected_type=Target)
self.assertEquals(set(expected_deps_list), set(target.dependencies))
class ConsoleTaskTest(TaskTest):
"""A baseclass useful for testing ConsoleTasks."""
def setUp(self):
Goal.clear()
super(ConsoleTaskTest, self).setUp()
task_type = self.task_type()
assert issubclass(task_type, ConsoleTask), \
'task_type() must return a ConsoleTask subclass, got %s' % task_type
def execute_task(self, config=None, args=None, targets=None):
"""Creates a new task and executes it with the given config, command line args and targets.
config: an optional string representing the contents of a pants.ini config.
args: optional list of command line flags, these should be prefixed with '--test-'.
targets: optional list of Target objects passed on the command line.
Returns the text output of the task.
"""
with closing(StringIO()) as output:
task = self.prepare_task(config=config,
args=args,
targets=targets,
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
address_mapper=self.address_mapper,
console_outstream=output)
task.execute()
return output.getvalue()
def execute_console_task(self, config=None, args=None, targets=None, extra_targets=None,
workspace=None):
"""Creates a new task and executes it with the given config, command line args and targets.
config: an optional string representing the contents of a pants.ini config.
args: optional list of command line flags, these should be prefixed with '--test-'.
targets: optional list of Target objects passed on the command line.
extra_targets: optional list of extra targets in the context in addition to those passed on the
command line.
workspace: optional Workspace to pass into the context.
Returns the list of items returned from invoking the console task's console_output method.
"""
task = self.prepare_task(config=config,
args=args,
targets=targets,
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
address_mapper=self.address_mapper,
workspace=workspace)
return list(task.console_output(list(task.context.targets()) + list(extra_targets or ())))
def assert_entries(self, sep, *output, **kwargs):
"""Verifies the expected output text is flushed by the console task under test.
NB: order of entries is not tested, just presence.
sep: the expected output separator.
*output: the output entries expected between the separators
**kwargs: additional kwargs passed to execute_task.
"""
# We expect each output line to be suffixed with the separator, so for , and [1,2,3] we expect:
# '1,2,3,' - splitting this by the separator we should get ['1', '2', '3', ''] - always an extra
# empty string if the separator is properly always a suffix and not applied just between
# entries.
self.assertEqual(sorted(list(output) + ['']), sorted((self.execute_task(**kwargs)).split(sep)))
def assert_console_output(self, *output, **kwargs):
"""Verifies the expected output entries are emitted by the console task under test.
NB: order of entries is not tested, just presence.
*output: the expected output entries
**kwargs: additional kwargs passed to execute_console_task.
"""
self.assertEqual(sorted(output), sorted(self.execute_console_task(**kwargs)))
def assert_console_output_ordered(self, *output, **kwargs):
"""Verifies the expected output entries are emitted by the console task under test.
NB: order of entries is tested.
*output: the expected output entries in expected order
**kwargs: additional kwargs passed to execute_console_task.
"""
self.assertEqual(list(output), self.execute_console_task(**kwargs))
def assert_console_raises(self, exception, **kwargs):
"""Verifies the expected exception is raised by the console task under test.
**kwargs: additional kwargs are passed to execute_console_task.
"""
with self.assertRaises(exception):
self.execute_console_task(**kwargs)
| fakeNetflix/square-repo-pants | tests/python/pants_test/tasks/test_base.py | test_base.py | py | 8,440 | python | en | code | 0 | github-code | 36 |
4578617525 | n,m,k = [int(x) for x in input().split()]
area = []
for i in range(k):
x,y,r = [int(a) for a in input().split()]
area.append([[x-r,x+r],[y-r,y+r]])
dx,dy = 0,0
for x in range(n):
cx = 0
for i in range(k):
if area[i][0][0] <= x <= area[i][0][1]:
cx +=1
dx = max(cx,dx)
for y in range(m):
cy = 0
for i in range(k):
if area[i][1][0] <= y <= area[i][1][1]:
cy +=1
dy = max(cy,dy)
print(max(dx,dy))
| naphattar/Betaprogramming | Chapter 1/1042.py | 1042.py | py | 485 | python | en | code | 0 | github-code | 36 |
17952105925 | """ This file defines a mesh as a tuple of (vertices, triangles)
All operations are based on numpy ndarray
- vertices: np ndarray of shape (n, 3) np.float32
- triangles: np ndarray of shape (n_, 3) np.uint32
"""
import numpy as np
def box_trimesh(
size, # float [3] for x, y, z axis length (in meter) under box frame
center_position, # float [3] position (in meter) in world frame
rpy= np.zeros(3), # euler angle (in rad) not implemented yet.
):
if not (rpy == 0).all():
raise NotImplementedError("Only axis-aligned box triangle mesh is implemented")
vertices = np.empty((8, 3), dtype= np.float32)
vertices[:] = center_position
vertices[[0, 4, 2, 6], 0] -= size[0] / 2
vertices[[1, 5, 3, 7], 0] += size[0] / 2
vertices[[0, 1, 2, 3], 1] -= size[1] / 2
vertices[[4, 5, 6, 7], 1] += size[1] / 2
vertices[[2, 3, 6, 7], 2] -= size[2] / 2
vertices[[0, 1, 4, 5], 2] += size[2] / 2
triangles = -np.ones((12, 3), dtype= np.uint32)
triangles[0] = [0, 2, 1] #
triangles[1] = [1, 2, 3]
triangles[2] = [0, 4, 2] #
triangles[3] = [2, 4, 6]
triangles[4] = [4, 5, 6] #
triangles[5] = [5, 7, 6]
triangles[6] = [1, 3, 5] #
triangles[7] = [3, 7, 5]
triangles[8] = [0, 1, 4] #
triangles[9] = [1, 5, 4]
triangles[10]= [2, 6, 3] #
triangles[11]= [3, 6, 7]
return vertices, triangles
def combine_trimeshes(*trimeshes):
if len(trimeshes) > 2:
return combine_trimeshes(
trimeshes[0],
combine_trimeshes(trimeshes[1:])
)
# only two trimesh to combine
trimesh_0, trimesh_1 = trimeshes
if trimesh_0[1].shape[0] < trimesh_1[1].shape[0]:
trimesh_0, trimesh_1 = trimesh_1, trimesh_0
trimesh_1 = (trimesh_1[0], trimesh_1[1] + trimesh_0[0].shape[0])
vertices = np.concatenate((trimesh_0[0], trimesh_1[0]), axis= 0)
triangles = np.concatenate((trimesh_0[1], trimesh_1[1]), axis= 0)
return vertices, triangles
def move_trimesh(trimesh, move: np.ndarray):
""" inplace operation """
trimesh[0] += move
| ZiwenZhuang/parkour | legged_gym/legged_gym/utils/trimesh.py | trimesh.py | py | 2,093 | python | en | code | 301 | github-code | 36 |
23049817329 | ##Remove tax from a payment - start with the gross
def onePercent(taxValue, num):
value = 100 + float(taxValue)
onePC = num/value
return onePC
def desiredPercentage(onePC, desired):
value = onePC * desired
return value
def calcTax(tax, num, desired):
onePC = onePercent(tax, num)
total = desiredPercentage (onePC, desired)
return total
def main():
'''main funcion'''
numbers = [2, 3 ,4, 5]
taxRemoved = []
rounded = []
for num in numbers:
taxRemoved.append(calcTax(20, num, 100))
for num in taxRemoved:
rounded.append(round(num, 3))
print(taxRemoved)
print(rounded)
if __name__ == "__main__":
main()
| idwesar/vat-calculator | reverse_vat_calc.py | reverse_vat_calc.py | py | 701 | python | en | code | 1 | github-code | 36 |
11045288759 | from datetime import datetime
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import action
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from rest_framework.mixins import (
RetrieveModelMixin, ListModelMixin, CreateModelMixin,
UpdateModelMixin, DestroyModelMixin
)
from .models import TodoList, TodoListItem, TodoListItemTimeTrack
from .serializers import TodoListSerializer, TodoListItemSerializer, TodoListItemTimeTrackSerializer
class TodoListViewSet(GenericViewSet, RetrieveModelMixin, ListModelMixin):
serializer_class = TodoListSerializer
permission_classes = (IsAuthenticated,)
lookup_field = 'date'
def get_queryset(self):
date_from = self.request.query_params.get('date_from')
date_to = self.request.query_params.get('date_to')
queryset = TodoList.objects.filter(user=self.request.user)
if date_from:
queryset.filter(date__gte=date_from)
if date_to:
queryset.filter(date__lte=date_to)
return queryset
def get_object(self):
date = self.kwargs.get('date')
try:
date = datetime.strptime(date, "%Y-%m-%d").date()
except ValueError as ex:
date = datetime.now().date()
return TodoList.objects.get_todo_list(self.request.user, date)
class TodoListItemViewSet(ModelViewSet):
queryset = TodoListItem.objects.all()
serializer_class = TodoListItemSerializer
permission_classes = (IsAuthenticated,)
def dispatch(self, request, *args, **kwargs):
self.todo_list_date = kwargs.pop('todo_list_date', None)
return super().dispatch(request, *args, **kwargs)
def get_todo_list(self):
try:
date = datetime.strptime(self.todo_list_date, "%Y-%m-%d").date()
except ValueError as ex:
date = datetime.now().date()
return TodoList.objects.get_todo_list(self.request.user, date)
def get_queryset(self):
user = self.request.user
todo_list = self.get_todo_list()
return super().get_queryset().filter(todo_list=todo_list,
todo_list__user=user)
def perform_create(self, serializer):
todo_list = self.get_todo_list()
serializer.save(todo_list=todo_list)
@action(detail=True, url_path='start')
def start_time_track(self, request, pk=None):
todo_list_item = self.get_object()
todo_list_item.start_task()
return Response(status=status.HTTP_200_OK)
@action(detail=True, url_path='end')
def end_time_track(self, request, pk=None):
todo_list_item = self.get_object()
todo_list_item.finish_task()
return Response(status=status.HTTP_200_OK)
@action(detail=True, url_path='done')
def mark_item_done(self, request, pk=None):
todo_list_item = self.get_object()
todo_list_item.done_task()
return Response(status=status.HTTP_200_OK)
@action(detail=True, url_path='undone')
def mark_item_undone(self, request, pk=None):
todo_list_item = self.get_object()
todo_list_item.undone_task()
return Response(status=status.HTTP_200_OK)
@action(detail=True, url_path='play-pause')
def toggle_start_stop(self, request, pk=None):
todo_list_item = self.get_object()
todo_list_item.toggle_start_stop()
return Response(status=status.HTTP_200_OK)
@action(detail=True, url_path='delete')
def delete_task(self, request, pk=None):
todo_list_item = self.get_object()
todo_list_item.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| mohsen-hassani-org/teamche | todo_list/api.py | api.py | py | 3,846 | python | en | code | 0 | github-code | 36 |
72973366503 | # coded by h4sski
'''
https://adriann.github.io/programming_problems.html
Write three functions that compute the sum of the numbers
in a list: using a for-loop, a while-loop and recursion.
(Subject to availability of these constructs in your
language of choice.)
'''
list_input = [1, 2, 3, 4, 5, 6, 7]
def for_loop(list):
sum = 0
for n in list:
sum += n
return sum
def while_loop(list):
sum = 0
i = 0
while i < len(list):
sum += list[i]
i += 1
return sum
def recursion_loop(list, step, sum):
if step < len(list):
sum += list[step] + recursion_loop(list, step+1, sum)
return sum
def main(list):
print('for loop\t\t', for_loop(list))
print('while loop\t\t', while_loop(list))
print('recursion loop\t\t', recursion_loop(list, 0, 0))
if __name__ == '__main__':
main(list_input) | h4sski-programming/Python | py2220.py | py2220.py | py | 866 | python | en | code | 0 | github-code | 36 |
24214471653 | from lib.api_lib import *
faker = Factory.create()
BILLING_FIRST_NAME = faker.firstName()
BILLING_LAST_NAME = faker.lastName()
BILLING_COMPANY = faker.company()
BILLING_STREET_ADD1 = faker.buildingNumber().lstrip("0")
BILLING_STREET_ADD2 = faker.streetName()
BILLING_CITY = faker.city()
BILLING_PHONE = faker.phoneNumber()
BILLING_STATE = 'New South Wales'
BILLING_POSTCODE = '2000'
EMAIL = faker.email()
EMAIL = EMAIL.translate(None,",!;#'?$%^&*()-~")
SHIPPING_FIRST_NAME = faker.firstName()
SHIPPING_LAST_NAME = faker.lastName()
SHIPPING_COMPANY = faker.company()
SHIPPING_STREET_ADD1 = faker.buildingNumber().lstrip("0")
SHIPPING_STREET_ADD2 = faker.streetName()
SHIPPING_CITY = faker.city()
SHIPPING_PHONE = faker.phoneNumber()
SHIPPING_STATE = 'New South Wales'
SHIPPING_POSTCODE = '2222'
COUPON_NAME = generate_random_string()
COUPON_CODE = generate_random_string()
COUPON_TYPE = "per_item_discount"
MANUAL_PAYMENT_NAME = faker.name()
post_order_payload = {
'customer_id': 0,
'date_created': "Thu, 04 Oct 2012 03:24:40 +0000",
'base_shipping_cost': "0.0000",
'shipping_cost_ex_tax': "0.0000",
'shipping_cost_inc_tax': "0.0000",
'base_handling_cost': "0.0000",
'handling_cost_ex_tax': "0.0000",
'handling_cost_inc_tax': "0.0000",
'base_wrapping_cost': "0.0000",
'wrapping_cost_ex_tax': "0.0000",
'wrapping_cost_inc_tax': "0.0000",
'items_shipped': 0,
'refunded_amount': "0.0000",
'staff_notes': "",
'customer_message': "",
'discount_amount': 5,
'billing_address': {
'first_name': BILLING_FIRST_NAME,
'last_name': BILLING_LAST_NAME,
'company': BILLING_COMPANY,
'street_1': BILLING_STREET_ADD1,
'street_2': BILLING_STREET_ADD2,
'city': BILLING_CITY,
'state': BILLING_STATE,
'zip': BILLING_POSTCODE,
'country': "Australia",
'country_iso2': "AU",
'phone': BILLING_PHONE,
'email': EMAIL
},
'shipping_addresses': [{
'first_name': SHIPPING_FIRST_NAME,
'last_name': SHIPPING_LAST_NAME,
'company': SHIPPING_COMPANY,
'street_1': SHIPPING_STREET_ADD1,
'street_2': SHIPPING_STREET_ADD2,
'city': SHIPPING_CITY,
'state': SHIPPING_STATE,
'zip': SHIPPING_POSTCODE,
'country': "Australia",
'country_iso2': "AU",
'phone': SHIPPING_PHONE,
'email': EMAIL
}],
'products': [{
'product_id': 75,
'quantity': 1,
'price_inc_tax': 10,
'price_ex_tax': 10
}, {
'product_id': 74,
'quantity': 1,
'price_inc_tax': 10,
'price_ex_tax': 10
}]
}
post_coupon_payload = {
"name": COUPON_NAME,
"code": COUPON_CODE,
"type": COUPON_TYPE,
"amount": 65,
"min_purchase": 0,
"enabled": True,
"applies_to": {
"entity": "categories",
"ids": [0]
},
"max_uses": 100,
"max_uses_per_customer": 1,
"restricted_to": {"countries":["AU"]}
}
| testing-sravan/tests-scripts-worked | Regression_suite_bigc/fixtures/order_coupons.py | order_coupons.py | py | 3,006 | python | en | code | 0 | github-code | 36 |
25404288175 | # -*- coding: utf-8 -*-
import torch.nn as nn
from network import Decomposition,MultiscaleDiscriminator,downsample
from utils import gradient
from ssim import SSIM
import torch
import torch.optim as optim
import torchvision
import os
import torch.nn.functional as F
from contiguous_params import ContiguousParams
class Model(nn.Module):
def __init__(self,args):
super(Model, self).__init__()
self.fusion = Decomposition()
self.D = MultiscaleDiscriminator(input_nc=1)
self.MSE_fun = nn.MSELoss()
self.L1_loss = nn.L1Loss()
self.SSIM_fun = SSIM()
if args.contiguousparams==True:
print("ContiguousParams---")
parametersF = ContiguousParams(self.fusion.parameters())
parametersD = ContiguousParams(self.D.parameters())
self.optimizer_G = optim.Adam(parametersF.contiguous(), lr=args.lr)
self.optimizer_D = optim.Adam(parametersD.contiguous(), lr=args.lr)
else:
self.optimizer_G = optim.Adam(self.fusion.parameters(), lr=args.lr)
self.optimizer_D = optim.Adam(self.D.parameters(), lr=args.lr)
self.g1 = self.g2 = self.g3 = self.s = self.img_re = None
self.loss = torch.zeros(1)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer_G , mode='min', factor=0.5, patience=2,
verbose=False, threshold=0.0001, threshold_mode='rel',
cooldown=0, min_lr=0, eps=1e-10)
self.min_loss = 1000
self.args = args
self.downsample = downsample()
self.criterionGAN = torch.nn.MSELoss()
if args.multiGPU:
self.mulgpus()
self.load()
self.load_D()
def load_D(self,):
if self.args.load_pt:
print("=========LOAD WEIGHTS D=========")
path = self.args.weights_path.replace("fusion","D")
print(path)
checkpoint = torch.load(path)
if self.args.multiGPU:
print("load D")
self.D.load_state_dict(checkpoint['weight'])
else:
print("load D single")
# 单卡模型读取多卡模型
state_dict = checkpoint['weight']
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
# load params
self.D.load_state_dict(new_state_dict)
print("=========END LOAD WEIGHTS D=========")
def load(self,):
start_epoch = 0
if self.args.load_pt:
print("=========LOAD WEIGHTS=========")
checkpoint = torch.load(self.args.weights_path)
start_epoch = checkpoint['epoch'] + 1
try:
if self.args.multiGPU:
print("load G")
self.fusion.load_state_dict(checkpoint['weight'])
else:
print("load G single")
# 单卡模型读取多卡模型
state_dict = checkpoint['weight']
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
# load params
self.fusion.load_state_dict(new_state_dict)
except:
model = self.fusion
print("weights not same ,try to load part of them")
model_dict = model.state_dict()
pretrained = torch.load(self.args.weights_path)['weight']
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in model_dict.items() if k in pretrained}
left_dict = {k for k, v in model_dict.items() if k not in pretrained}
print(left_dict)
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
print(len(model_dict),len(pretrained_dict))
# model_dict = self.fusion.state_dict()
# pretrained_dict = {k: v for k, v in model_dict.items() if k in checkpoint['weight'] }
# print(len(checkpoint['weight'].items()), len(pretrained_dict), len(model_dict))
# model_dict.update(pretrained_dict)
# self.fusion.load_state_dict(model_dict)
print("start_epoch:", start_epoch)
print("=========END LOAD WEIGHTS=========")
print("========START EPOCH: %d========="%start_epoch)
self.start_epoch = start_epoch
def mulGANloss(self, input_, is_real):
if is_real:
label = 1
else:
label = 0
if isinstance(input_[0], list):
loss = 0.0
for i in input_:
pred = i[-1]
target = torch.Tensor(pred.size()).fill_(label).to(pred.device)
loss += self.criterionGAN(pred, target)
return loss
else:
target = torch.Tensor(input_[-1].size()).fill_(label).to(input_[-1].device)
return self.criterionGAN(input_[-1], target)
def forward(self,isTest=False):
self.g1, self.g2, self.g3, self.s, self.img_re = self.fusion(self.img,isTest)
def set_requires_grad(self, net, requires_grad=False):
for param in net.parameters():
param.requires_grad = requires_grad
def backward_G(self):
img = self.img
img_re = self.img_re
img_g = gradient(img)
self.img_down = self.downsample(img)
self.img_g = img_g
# print(self.g1.sum(),self.g2.sum(),self.g3.sum(),img_g.sum())
# print(self.g1.mean(), self.g2.mean(), self.g3.mean(), img_g.mean())
g1 = self.MSE_fun(self.g1, img_g)
g2 = self.MSE_fun(self.g2, img_g)
g3 = self.MSE_fun(self.g3, img_g)
grd_loss = g1+g2+g3
self.lossg1 ,self.lossg2,self.lossg3 = g1,g2,g3
# grd_loss = self.MSE_fun(self.g1, img_g) + self.MSE_fun(self.g2, img_g) + self.MSE_fun(self.g3, img_g)
ssim_loss = 1 - self.SSIM_fun(img_re, img)
ssim_loss = ssim_loss * 10
pixel_loss = self.MSE_fun(img_re, img)
pixel_loss = pixel_loss * 100
loss_G = self.mulGANloss(self.D(self.s), is_real=True)*0.1
# 损失求和 回传
loss = pixel_loss + ssim_loss + grd_loss + loss_G
loss.backward()
self.loss,self.pixel_loss,self.ssim_loss, self.grd_loss = loss,pixel_loss,ssim_loss, grd_loss
self.loss_G = loss_G
def backward_D(self):
# RealReal
# Real
pred_real = self.D(self.img_down.detach())
loss_D_real = self.mulGANloss(pred_real, is_real=True)
# Fake
pred_fake = self.D(self.s.detach())
loss_D_fake = self.mulGANloss(pred_fake, is_real=False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
self.loss_D = loss_D
self.loss_D_real,self.loss_D_fake = loss_D_real,loss_D_fake
def mulgpus(self):
self.fusion= nn.DataParallel(self.fusion.cuda(), device_ids=self.args.GPUs, output_device=self.args.GPUs[0])
self.D = nn.DataParallel(self.D.cuda(), device_ids=self.args.GPUs, output_device=self.args.GPUs[0])
def setdata(self,img):
img = img.to(self.args.device)
self.img = img
def step(self):
self.optimizer_G.zero_grad() # set G gradients to zero
self.forward()
self.set_requires_grad(self.D, False) # D require no gradients when optimizing G
self.backward_G() # calculate gradients for G
self.optimizer_G.step() # update G weights
# if it % 10 == 0:
self.set_requires_grad(self.D, True)
self.optimizer_D.zero_grad() # set D gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D weights
self.print = 'ALL[%.5lf] pixel[%.5lf] grd[%.5lf](%.5lf %.5lf %.5lf) ssim[%.5lf] G[%.5lf] D[%.5lf][%.5lf %.5lf ]' %\
(self.loss.item(), self.pixel_loss.item(), self.grd_loss.item(),self.lossg1.item() ,self.lossg2.item(),self.lossg3.item(), self.ssim_loss.item(),
self.loss_G.item(),self.loss_D.item(),self.loss_D_real.item(),self.loss_D_fake.item(),)
def saveimg(self,epoch,num=0):
img = torchvision.utils.make_grid(
[self.img[0].cpu(), self.img_re[0].cpu(), self.img_down[0].cpu(),self.img_g[0].cpu(), self.s[0].cpu(), self.g1[0].cpu(), self.g2[0].cpu(),
self.g3[0].cpu(), (self.g1+self.g2+self.g3)[0].cpu()], nrow=5)
torchvision.utils.save_image(img, fp=(os.path.join('output/result_' + str(epoch) + '.jpg')))
# torchvision.utils.save_image(img, fp=(os.path.join('output/epoch/'+str(num)+'.jpg')))
def saveimgdemo(self):
self.img_down = self.downsample(self.img)
self.img_g = gradient(self.img)
img = torchvision.utils.make_grid(
[self.img[0].cpu(), self.img_re[0].cpu(), self.img_down[0].cpu(),self.img_g[0].cpu(), self.s[0].cpu(), self.g1[0].cpu(), self.g2[0].cpu(),
self.g3[0].cpu(), (self.g1+self.g2+self.g3)[0].cpu()], nrow=5)
torchvision.utils.save_image(img, fp=(os.path.join('demo_result.jpg')))
# torchvision.utils.save_image(img, fp=(os.path.join('output/epoch/'+str(num)+'.jpg')))
def saveimgfuse(self,name=''):
self.img_down = self.downsample(self.img)
self.img_g = gradient(self.img)
img = torchvision.utils.make_grid(
[self.img[0].cpu(), self.img_g[0].cpu(), ((self.g1+self.g2+self.g3)*1.5)[0].cpu()], nrow=3)
torchvision.utils.save_image(img, fp=(os.path.join(name.replace('Test','demo'))))
# torchvision.utils.save_image(img, fp=(os.path.join('output/epoch/'+str(num)+'.jpg')))
def save(self, epoch):
## 保存模型和最佳模型
if self.min_loss > self.loss.item():
self.min_loss = self.loss.item()
torch.save({'weight': self.fusion.state_dict(), 'epoch': epoch, },os.path.join('weights/best_fusion.pt'))
torch.save({'weight': self.D.state_dict(), 'epoch': epoch, }, os.path.join('weights/best_D.pt'))
print('[%d] - Best model is saved -' % (epoch))
if epoch % 1 == 0:
torch.save({'weight': self.fusion.state_dict(), 'epoch': epoch, },os.path.join('weights/epoch' + str(epoch) + '_fusion.pt'))
torch.save({'weight': self.D.state_dict(), 'epoch': epoch, },os.path.join('weights/epoch' + str(epoch) + '_D.pt'))
def getimg(self):
return self.g1, self.g2,self.g3,self.s
| thfylsty/ImageFusion_DeepDecFusion | model.py | model.py | py | 11,325 | python | en | code | 5 | github-code | 36 |
73225167144 | from extra_streamlit_tools._logging import logging as logger
import streamlit as st
from typing import Any, Optional
def clear_cache(keep_keys: Optional[list[str]] = None) -> None:
"""
Resets the Streamlit cache.
Parameters
----------
keep_keys:Optional[list[str]]
Keys to not be cleared from cache
"""
logger.debug("Clearing cache")
for key in st.session_state.keys():
if keep_keys is None or key not in keep_keys:
logger.debug(f"Deleting key: {key}")
del st.session_state[key]
else:
logger.debug(f"Keeping key: {key}")
def init_session_keys(key_value_pairs: dict[str, Any]) -> None:
"""
The init_session_keys function is a helper function that initializes the session state with keys and values.
Parameters
----------
key_value_pairs:dict[str, Any]
A dictionairy of key_value_pairs
""" # noqa
for key, value in key_value_pairs.items():
if key not in st.session_state:
st.session_state[key] = value
def change_in_session_state(key_value_pairs: dict[str, Any]):
"""
The change_in_session_state function is a helper function that allows you to change session state values.
Parameters
----------
key_value_pairs:dict[str, Any]
Dictionairy with the Streamlit session_state key and the its new value
""" # noqa
for key, value in key_value_pairs.items():
st.session_state[key] = value
def set_selectbox_index(
selectbox_key: str, session_state_var_name: str, values: list[Any]
) -> None:
"""
The set_selectbox_index function is a helper function that sets the index of a selectbox to the value
of another session state variable. This is useful when you want to set the default value of one selectbox
to be equal to another, but you don't know what that other's default value will be until runtime.
Parameters
----------
selectbox_key:str
Specify the key of the selectbox
session_state_var_name:str
Set the session state variable name
values:list[Any]
The list of values in the selectbox
""" # noqa
st.session_state[session_state_var_name] = values.index(
st.session_state[selectbox_key]
)
| sTomerG/extra-streamlit-tools | src/extra_streamlit_tools/utils.py | utils.py | py | 2,324 | python | en | code | 0 | github-code | 36 |
29945890561 | from enaml.core.enaml_compiler import EnamlCompiler
from enaml.core.parser import parse
def compile_source(source, item, filename="<test>", namespace=None):
"""Compile Enaml source code and return the target item.
Parameters
----------
source : str
The Enaml source code string to compile.
item : str
The name of the item in the resulting namespace to return.
filename : str, optional
The filename to use when compiling the code. The default
is '<test>'.
namespace : dict
Namespace in which to execute the code
Returns
-------
result : object
The named object from the resulting namespace.
"""
ast = parse(source, filename)
code = EnamlCompiler.compile(ast, filename)
namespace = namespace or {}
exec(code, namespace)
return namespace[item]
| codelv/enaml-web | tests/utils.py | utils.py | py | 855 | python | en | code | 99 | github-code | 36 |
36812069772 | # Code by @AmirMotefaker
# projecteuler.net
# https://projecteuler.net/problem=25
# 1000-digit Fibonacci number
# Problem 25
# The Fibonacci sequence is defined by the recurrence relation:
# Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
# Hence the first 12 terms will be:
# F1 = 1
# F2 = 1
# F3 = 2
# F4 = 3
# F5 = 5
# F6 = 8
# F7 = 13
# F8 = 21
# F9 = 34
# F10 = 55
# F11 = 89
# F12 = 144
# The 12th term, F12, is the first term to contain three digits.
# What is the index of the first term in the Fibonacci sequence to contain 1000 digits?
# Solution 1
# def fibonacci(a, b, n):
# if n == 1:
# return a
# else:
# return fibonacci(a+b, a, n-1)
# print (fibonacci(1, 0, 12))
# Solution 2
# loop instead of recursion
# def fibonacci(n):
# a = 1
# b = 0
# while n > 1:
# a, b = a+b, a
# n = n - 1
# return a
# print (fibonacci(12))
# Solution 3
import time
start_time = time.time() #Time at the start of program execution
term = 2
fib = [1, 1]
while len(str(fib[1])) < 1000:
term += 1
fib = [fib[1], fib[0] + fib[1]]
print (term)
end_time = time.time() #Time at the end of execution
print ("Time of program execution:", (end_time - start_time)) # Time of program execution
### Answer: 4782
| AmirMotefaker/ProjectEuler | Problem25.py | Problem25.py | py | 1,293 | python | en | code | 1 | github-code | 36 |
8890345896 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 15:50:56 2018
@author: vitorhadad
"""
#
import numpy as np
import networkx as nx
import os
from tqdm import trange
from matching.solver.kidney_solver2 import optimal, greedy, get_two_cycles
from matching.utils.data_utils import clock_seed, evaluate_policy, get_n_matched, get_cycle_probabilities
from matching.utils.env_utils import snapshot, two_cycles
from matching.policy_function.policy_function_lstm import RNN
from matching.policy_function.policy_function_mlp import MLPNet
from matching.policy_function.policy_function_gcn import GCNet
from matching.policy_function.policy_function_rgcn import RGCNet
from matching.policy_function.policy_function_attention import AttentionRNN
from matching.environment.abo_environment import ABOKidneyExchange
#%%
def run_node2vec(G,
path = "",
input = "edges.txt",
output = "emb.txt",
d = 10):
nx.write_edgelist(G, path + input, data = False)
cmd = "./node2vec -i:{}{} -o:{}{} -d:{} -dr"\
.format(path, input, path, output, d)
os.system(cmd)
with open("emb.txt", "r") as emb:
lines = emb.readlines()
n = len(lines) - 1
features = np.zeros(shape = (n, d))
for k, line in enumerate(lines[1:]):
_, *xs = line.split(" ")
try:
features[k] = [float(x) for x in xs]
except:
import pdb; pdb.set_trace()
return features
def get_features(env):
opt= optimal(env)
features = []
labels = []
for t in range(env.time_length):
liv = np.array(env.get_living(t))
A = env.A(t)
has_cycle = np.diag(A @ A) > 0
liv = liv[has_cycle]
m = opt["matched"][t]
Y = np.zeros(len(liv))
Y[np.isin(liv, list(m))] = 1
labels.append(Y)
if len(liv) > 0:
X = env.X(t)[has_cycle]
subg = env.subgraph(liv)
E = run_node2vec(subg)
features.append(np.hstack([X, E]))
env.removed_container[t].update()
return np.vstack(features), np.hstack(labels)
env = ABOKidneyExchange(entry_rate=5, death_rate=.1, time_length=10, seed=clock_seed())
X, Y = get_features(env)
np.save("X.npy", X)
np.save("Y.npy", Y)
#%%
#%%
#
#
#%%
#
#env = ABOKidneyExchange(entry_rate = 5,
# death_rate = .1,
# time_length = 1500,
# seed = clock_seed())
#
#opt = optimal(env)
#gre = greedy(env)
#
##%%
#def evaluate(algo, env, thres):
#
# env.removed_container.clear()
# rewards = []
# for t in trange(env.time_length):
#
# liv = np.array(env.get_living(t))
# A = env.A(t)
# has_cycle = np.diag(A @ A) > 0
# liv_and_cycle = liv[has_cycle]
# yhat_full = np.zeros(len(liv), dtype=bool)
#
# if len(liv_and_cycle) == 0:
# continue
#
# X = env.X(t)[has_cycle]
# subg = env.subgraph(liv_and_cycle)
# E = run_node2vec(subg)
# F = np.hstack([X, E])
#
# yhat = algo.predict_proba(F)[:,1] > thres
# yhat_full[has_cycle] = yhat
# potential = liv[yhat_full]
#
# removed = optimal(env, t, t, subset=potential)["matched"][t]
# env.removed_container[t].update(removed)
# rewards.append(len(removed))
#
# return rewards
#
#
#r = evaluate(pipe, env, .05)
#
#gre_n = get_n_matched(gre["matched"], 0, env.time_length)
#opt_n = get_n_matched(opt["matched"], 0, env.time_length)
#print("\nrewards\n",
# np.sum(r[500:]),
# np.sum(gre_n[500:]),
# np.sum(opt_n[500:]))
# | halflearned/organ-matching-rl | matching/temp/temp.py | temp.py | py | 3,852 | python | en | code | 2 | github-code | 36 |
21437705635 | from computer import TogglePuter
class BadSignal(Exception):
def __init__(self, signal):
self.message = str(signal)
class InfiniteLoop(Exception):
pass
class SignalPuter(TogglePuter):
def __init__(self):
super().__init__()
self.signal = []
def out(self, x):
value = self._get_value(x)
self.signal.append(value)
if value == len(self.signal) % 2:
raise BadSignal(self.signal)
if len(self.signal) > 10:
raise InfiniteLoop
self.pc += 1
def find_input(instructions):
aval = 0
while True:
try:
computer = SignalPuter()
computer.a = aval
computer.run(instructions, debug=False)
except BadSignal:
# print("{}: {}".format(aval, computer.signal))
aval += 1
except InfiniteLoop:
return aval
if __name__ == "__main__":
import doctest
doctest.testmod()
with open('25input.txt', 'r') as inputfile:
lines = inputfile.readlines()
print("Part 1: {}".format(find_input(lines)))
| philipdouglas/adventofcode | 2016/25.py | 25.py | py | 1,108 | python | en | code | 1 | github-code | 36 |
35779416431 | import time
import netsvc
from osv import fields,osv
class purchase_requisition(osv.osv):
_inherit = "purchase.requisition"
_description="Purchase Requisition"
_columns = {
'state': fields.selection([('draft','Draft'),('lv_approve2','Waitting Manager Approve'),('in_progress','In Progress'),('cancel','Cancelled'),('done','Done')], 'State', required=True)
}
_defaults = {
'date_start': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state': 'draft',
'exclusive': 'multiple',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=c),
'user_id': lambda self, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'purchase_ids':[],
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
})
return super(purchase_requisition, self).copy(cr, uid, id, default, context)
def tender_cancel(self, cr, uid, ids, context=None):
purchase_order_obj = self.pool.get('purchase.order')
for purchase in self.browse(cr, uid, ids, context=context):
for purchase_id in purchase.purchase_ids:
if str(purchase_id.state) in('draft','wait'):
purchase_order_obj.action_cancel(cr,uid,[purchase_id.id])
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def tender_in_progress(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'lv_approve2'} ,context=context)
return True
def manager_approve(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'in_progress'} ,context=context)
return True
def tender_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
return True
def tender_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'done', 'date_end':time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
purchase_requisition() | aryaadiputra/addons60_ptgbu_2013 | ad_purchase_requisition_double_validation/purchase_requisition.py | purchase_requisition.py | py | 2,365 | python | en | code | 0 | github-code | 36 |
7535403822 | from __future__ import print_function
#
# -*- coding: utf-8 -*-#
# eso.org
# Copyright 2011 ESO
# Authors:
# Lars Holm Nielsen <lnielsen@eso.org>
# Dirk Neumayer <dirk.neumayer@gmail.com>
#
#
# Mantis 12175: Fix release dates for images and videos
#
#
# Find wrong release_dates:
# 1) Check id with release_date - e.g. opo0214a with release date in 2010 must be wrong
# should be opoYYNNx NN is a cont. number?
# 2) Release dates with 2011-03-03 18:00-18:44 are wrong
#
# Don't bother about images connected to announcements, releases and potws.
#
# For images with long_caption_link or press_release_link follow the link and extract the date.
#
#
#*************************************************************************************************************
from future import standard_library
standard_library.install_aliases()
from djangoplicity.utils import optionparser
from djangoplicity.media.models import Image
from djangoplicity.media.models import Video
import re
import urllib.request, urllib.error, urllib.parse
import logging, sys
import socket
from datetime import datetime
import pytz
import hubblesite
def change_datetime(obj):
'''
follows the long_caption_link or the press_release_link to get the correct date
'''
# get link to image or press release
link = None
success = False
if obj.long_caption_link.find('http') > -1:
link = obj.long_caption_link
elif obj.press_release_link.find('http') > -1:
link = obj.press_release_link
# follow link and get new date
if link:
release_date = hubblesite.get_release_date(link)
if release_date:
try:
#print '-------------------------------------------------------'
#print obj.id, obj.release_date.strftime('%Y-%B-%d %I:%M %p %Z')
release_date = release_date.astimezone( pytz.timezone( 'Europe/Berlin' ) )
release_date = datetime.replace(release_date, tzinfo=None)
obj.release_date = release_date
#print obj.id, obj.release_date.strftime('%Y-%B-%d %I:%M %p %Z')
obj.save()
success = True
except:
print(obj.id,' save failed!')
pass
return success
def process_objects(objs):
'''
find the objects that need a correction of the release_date
'''
pat = re.compile('[a-zA-Z]+([0-9]{2})\S+')
count = 0
finddate1 = datetime.strptime('2011-03-03 18:00:00','%Y-%m-%d %H:%M:%S')
finddate2 = datetime.strptime('2011-03-03 19:00:00','%Y-%m-%d %H:%M:%S')
for obj in objs:
YY = None
dt = obj.release_date
if (dt):
# process all objects with 2011-03-03 18:00:00 - 19:00:00
if dt >= finddate1 and dt <= finddate2:
if change_datetime(obj): count = count + 1
print(obj.id, 'old: ', dt, '\t new: ', obj.release_date ,'\t\t reason: 20110303')
# process all objects where opoYY YY does not match the year of the release_date
else:
#only care about opo... and heic...
if obj.id.find('opo') == -1 and obj.id.find('heic') == -1: continue
YY = pat.findall(obj.id)
if len(YY) > 0:
YY = YY[0]
#print obj.id, YY, dt.strftime('%y'), dt
if YY != dt.strftime('%y'):
if change_datetime(obj): count = count + 1
print(obj.id, 'old: ', dt, '\t new: ', obj.release_date ,'\t\t reason: ', YY,' != ', dt.strftime('%y'))
else:
pass
#print obj.id, ' no release_date'
return count
if __name__ == '__main__':
logger = logging.getLogger('app.' + __name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stderr))
logger.propagate = False
logger.info("Fix release dates for images and videos")
# timeout in seconds
timeout = 60
socket.setdefaulttimeout(timeout)
test = '''<h2 class="release-number"><strong>News Release Number:</strong> STScI-2006-25</h2>'''
pattern = re.compile('''h2 class="release-number".*?:.*?>\s*(.*?)<.*?h2''')
print('videos')
print(process_objects(Video.objects.all()), ' videos have a new release_date')
print('images')
print(process_objects(Image.objects.all()), ' images have a new release_date')
| esawebb/esawebb | scripts/correct_dates.py | correct_dates.py | py | 4,493 | python | en | code | 0 | github-code | 36 |
28068719892 | stones = [2, 4, 5, 3, 2, 1, 4, 2, 5, 1]
k = 3
def check(stones, k, mid):
count = 0
for i in stones:
if i < mid:
count += 1
else:
count = 0
if count == k: # 뛰어 넘어야 하는 stone의 개수가 k개가 되면 건널 수 없다.
return 0
return 1
def solution(stones, k):
answer = 0
left = 1 # 가능한 최소 인원 1명
right = max(stones) + 1 # 가능하지 않은 최소 인원 (= 가능한 최대 인원 +1)
# 이분 탐색
while left <= right:
mid = (left + right) // 2
if check(stones, k, mid):
left = mid + 1
else:
right = mid - 1
answer = left - 1 # 마지막에 가능했던 인원
return answer
# 정확성은 완벽 하지만 효율성은 개똥
#
# stones = [2, 4, 5, 3, 2, 1, 4, 2, 5, 1]
# k = 3
#
# result = []
# for i in range(0, len(stones)-k+1):
# result.append(max(stones[i:i+k]))
# print(min(result))
| hwanginbeom/algorithm_study | 2.algorithm_test/21.03.21/징검다리 건너기_sejin.py | 징검다리 건너기_sejin.py | py | 987 | python | ko | code | 3 | github-code | 36 |
35751230673 | from util.data_util import read_energy_data
# best fit algorithm, which consistently choose the least frequency if applicable.
class bestReward():
def __init__(self, env, max_episode, ep_long):
self.env = env
self.last_deploy_core = 0
self.max_episode = max_episode
self.ep_long = ep_long
def saveResults(self):
self.env.saveResults()
print(f"Overall: {self.env.overall_results}")
print(f"Action choices: {self.env.action_choices}")
def act(self, s):
possible_actions = self.env.getPossibleActionGivenState(s)
action = 0
max_value = -100
for test_action in possible_actions:
if test_action == 0:
continue
test_value = self.env.calculateRewardOnAction(test_action)
# print(test_value)
if max_value < test_value:
max_value = test_value
action = test_action
return action
def test(self):
GHI_Data = read_energy_data(is_train=False)
done = False
ep_num = 0
self.env.replay(is_train=False, simulation_start=ep_num * self.ep_long,
simulation_end=(ep_num + 1) * self.ep_long, GHI_Data=GHI_Data)
print('\n\n\n--------------------------------------------------')
while ep_num < self.max_episode:
state = self.env.reset(is_train=False, simulation_start=ep_num * self.ep_long,
simulation_end=(ep_num + 1) * self.ep_long, GHI_Data=GHI_Data)
done = False
while not done:
action = self.act(state)
next_state, reward, done = self.env.step(action)
state = next_state
print(f'Episode test {ep_num}')
self.saveResults()
ep_num += 1
# self.env.event_queue.print_queue()
| Tahuubinh/Adaptive_processor_frequency_IoT_offloading | code/schedule/best_reward.py | best_reward.py | py | 1,907 | python | en | code | 0 | github-code | 36 |
10535469198 | import pygame
import os
from sys import exit
WIDTH, HEIGHT = 1600, 900
pygame.init()
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Buildings")
WHITE = (255, 255, 255)
FPS = 60
indx = 0
font = pygame.font.Font(None, 50)
class Building():
def __init__(self, name, offset_x, offset_y):
self.name = name
self.building = pygame.image.load(os.path.join(
"assets/buildings", self.name)).convert_alpha()
self.offset_x = offset_x
self.offset_y = offset_y
self.x = offset_x
self.y = offset_y
def color_key(self, color):
self.building.set_colorkey(color)
def draw(self, WIN, camera_position):
self.VEL = 20
self.x = -camera_position[0]+self.offset_x
self.y = -camera_position[1]+self.offset_y
WIN.blit(self.building, (self.x, self.y))
def movements(self, keys_pressed):
if keys_pressed is None:
return
if keys_pressed[pygame.K_a]:
self.x += self.VEL
if keys_pressed[pygame.K_LSHIFT]:
self.x += self.VEL
elif keys_pressed[pygame.K_d]:
self.x -= self.VEL
if keys_pressed[pygame.K_LSHIFT]:
self.x -= self.VEL
def movements_updown(self, keys_pressed):
if keys_pressed[pygame.K_w]:
self.y += self.VEL
if keys_pressed[pygame.K_LSHIFT]:
self.y += self.VEL
elif keys_pressed[pygame.K_s]:
self.y -= self.VEL
if keys_pressed[pygame.K_LSHIFT]:
self.y -= self.VEL
def collision_bound(self, camera_position, x_fac, y_fac, width_fac, height_fac, show=False):
self.x = -camera_position[0]+self.offset_x
self.y = -camera_position[1]+self.offset_y
object_collision_bound = pygame.Rect(
self.x+x_fac, self.y+y_fac, width_fac, height_fac)
if show is True:
pygame.draw.rect(WIN, (0, 255, 0), object_collision_bound)
def main():
building1 = Building("4(1).png", -100, -1250)
building2 = Building("5(1).png", 2000, -1800)
building3 = Building("6.png", 5500, -1700)
clock = pygame.time.Clock()
while True:
WIN.fill(WHITE)
clock.tick(FPS)
for event in pygame.event.get():
keys_pressed = pygame.key.get_pressed()
if event.type == pygame.QUIT:
pygame.quit()
exit()
building1.draw(WIN, [0, 0])
building1.movements(keys_pressed)
building1.movements_updown(keys_pressed)
building2.draw(WIN, [0, 0])
building2.movements(keys_pressed)
building2.movements_updown(keys_pressed)
building3.draw(WIN, [0, 0])
building3.movements(keys_pressed)
building3.movements_updown(keys_pressed)
pygame.display.update()
if __name__ == "__main__":
main()
| Hemant-29/pygame-project | building.py | building.py | py | 2,913 | python | en | code | 0 | github-code | 36 |
41198927211 | import logging
import json
from lxml import etree
def get_field_text(tree, path):
nsmap = {"n1": tree.getroot().nsmap['n1']}
node = tree.xpath(path, namespaces=nsmap)
if len(node) > 0:
return node[0].text
return ''
def parse_metadata(scene, xml_filename, json_filename):
logger = logging.getLogger(scene)
logger.info("Parsing XML metadata from {0}".format(xml_filename))
result = {'!scene': scene}
tree = etree.parse(xml_filename)
with open(json_filename, 'r') as myfile:
tile_json = myfile.read()
tile = json.loads(tile_json)
scene_time = get_field_text(tree, "n1:General_Info/SENSING_TIME")
result['acquired_date'] = scene_time.split('T')[0]
result['acquired_time'] = scene_time.split('T')[1]
coords = tile['tileGeometry']['coordinates'][0]
result["#scene_corner_UL_x"] = coords[0][0]
result["#scene_corner_UL_y"] = coords[0][1]
result["#scene_corner_UR_x"] = coords[1][0]
result["#scene_corner_UR_y"] = coords[1][1]
result["#scene_corner_LR_x"] = coords[2][0]
result["#scene_corner_LR_y"] = coords[2][1]
result["#scene_corner_LL_x"] = coords[3][0]
result["#scene_corner_LL_y"] = coords[3][1]
result["#utm_zone"] = tile["utmZone"]
return result
| amy-langley/irma-import | xml_operations.py | xml_operations.py | py | 1,267 | python | en | code | 0 | github-code | 36 |
16435010481 | import unittest
from selectors.NumbersFormRangeSelector import NumbersFormRangeSelector
class TestNumbersFormRangeSelector(unittest.TestCase):
def test_should_return_empty_sequence_when_empty_sequence_is_given(self):
empty_sequence = []
selector = NumbersFormRangeSelector(1, 10)
self.assertTrue(len(selector.select(empty_sequence)) == 0)
def test_should_return_empty_sequence_when_there_are_no_numbers_from_range(self):
empty_sequence = [3, 5, 355, 321, 45]
selector = NumbersFormRangeSelector(-8, 0)
self.assertTrue(len(selector.select(empty_sequence)) == 0)
def test_should_return_all_numbers_from_range(self):
sequence = [1, 2, 3, 6, 4, 15, 23]
correct_result_sequence = [2, 3, 6, 4]
selector = NumbersFormRangeSelector(2, 6)
self.assertEqual(correct_result_sequence, selector.select(sequence))
def test_should_throw_exception_when_wrong_range_is_given(self):
with self.assertRaises(ValueError):
NumbersFormRangeSelector(100, 6)
if __name__ == '__main__':
unittest.main()
| stardreamer/patterns | Behavioral/strategy/examples/selection/Python/Selector/tests/NumbersFormRangeSelectorTests.py | NumbersFormRangeSelectorTests.py | py | 1,106 | python | en | code | 0 | github-code | 36 |
13015450298 | import asyncio
import websockets
HOST = '0.0.0.0'
WS_PORT = 3333
TCP_PORT = 8888
loop = asyncio.get_event_loop()
websocket_connections = []
tcp_connections = []
async def send_status(status: str):
data = status.encode()
for c in tcp_connections:
try:
writer = c[1]
writer.write(data)
await writer.drain()
except Exception as e:
print(e)
async def executor(command: str):
command = command.lower()
words = command.split(" ")
print(words)
if 'вправо' in words:
await send_status('d')
if 'право' in words:
await send_status('d')
if 'права' in words:
await send_status('d')
if 'cправа' in words:
await send_status('d')
if 'влево' in words:
await send_status('a')
if 'лево' in words:
await send_status('a')
if 'лего' in words:
await send_status('a')
if 'лева' in words:
await send_status('a')
if 'назад' in words:
await send_status('s')
if 'вперед' in words:
await send_status('w')
if 'перед' in words:
await send_status('w')
async def websock_handler(websocket, path):
print('WS connect')
global websocket_connections
websocket_connections.append(websocket)
try:
while True:
msg = await websocket.recv()
print('[MSG INCOMING]', msg)
await executor(msg)
except websockets.exceptions.ConnectionClosedOK as e:
pass
websocket_connections.remove(websocket)
print('WS disc')
async def tcp_handler(reader, writer):
print('connected to ue')
global tcp_connections
connection = (reader, writer)
tcp_connections.append(connection)
writer.write("ping".encode())
while True:
data = await reader.read(100)
if len(data) == 0:
break
await writer.drain()
writer.close()
tcp_connections.remove(connection)
print('disconnected UE')
async def run_ws():
await websockets.serve(websock_handler, HOST, WS_PORT)
async def run_tcp():
await asyncio.start_server(tcp_handler, HOST, TCP_PORT, loop=loop)
def main():
loop.create_task(run_ws())
loop.create_task(run_tcp())
try:
loop.run_forever()
except KeyboardInterrupt:
print("stoped")
if __name__ == '__main__':
main()
| DeadMorose777/UE4_SpeechController | speech_controller-main/host2.py | host2.py | py | 2,119 | python | en | code | 0 | github-code | 36 |
36779117268 | # NOTE: mini function for testing your UDP connection w the computer running the server and MAX
from pythonosc import udp_client
PORT_TO_MAX = 5002
IP = "192.168.2.2"
global client
client = udp_client.SimpleUDPClient(IP, PORT_TO_MAX)
input("hello")
while True:
print("sent")
client.send_message("/point", 1)
input("pause") | mshen63/RoboticMusicianship_CV_Project | oldReferenceFiles/socketTrial.py | socketTrial.py | py | 336 | python | en | code | 0 | github-code | 36 |
37486686803 | import random
def find_duplicate(xs):
mini, maxi, acc = xs[0], xs[0], xs[0]
for i in range(1, len(xs)):
mini = min(mini, xs[i])
maxi = max(maxi, xs[i])
acc = acc ^ xs[i]
mask = mini
for i in range(mini + 1, maxi + 1):
mask = mask ^ i
return mask ^ acc
xs = [5, 3, 4, 1, 5, 2]
print(xs)
result = find_duplicate(xs)
print(result)
| tvl-fyi/depot | users/wpcarro/scratch/facebook/find-unique-int-among-duplicates.py | find-unique-int-among-duplicates.py | py | 382 | python | en | code | 0 | github-code | 36 |
11032982168 | """
Given a singly linked list, determine if it is a palindrome
"""
class Solution(object):
def isPalindrome(self, head):
fast = slow = head
# Move slow to the middle of the list
while fast and slow:
fast = fast.next.next
slow = slow.next
# Reverse second half
node = None
while slow:
nxt = slow.next
# Make slow.next None/end
slow.next = node
node = slow
slow = nxt
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
| tonydelanuez/python-ds-algos | probs/palindrome-linked-list.py | palindrome-linked-list.py | py | 523 | python | en | code | 0 | github-code | 36 |
34898939242 | import numpy as np
import librosa
from typing import List
import matplotlib.pyplot as plt
from scipy import signal
from scipy.fft import rfft, rfftfreq
import os
TECHNIQUES = ['High', 'Tasto', 'Bend', 'Harm', 'Strum', 'Pont', 'Ord', 'Chord', 'Smack', 'Palm', 'TEST', 'SILENCE']
#TECHNIQUES = os.listdir("samples/manual") + ["SILENCE"]
# ['Bend', 'Chord', 'Harm', 'High', 'Ord', 'Palm', 'Pont', 'Smack', 'Strum', 'Tasto', 'TEST', 'SILENCE']
# high tasto bend harm strum pont ord chord smack palm
def find_onsets(y: np.ndarray, sr: int) -> np.ndarray:
"""Takes a numpy array and returns an array of onsets, currenly using librosa"""
#return librosa.onset.onset_detect(y, sr=sr, backtrack=True, units="samples")
o_env = librosa.onset.onset_strength(y=y, sr=sr, max_size=8)
samps = librosa.samples_like(o_env)
return librosa.onset.onset_detect(onset_envelope=o_env, sr=sr, backtrack=True, units="samples",
delta=4.3, hop_length=512, normalize=False,
pre_max = 1.0, post_max = 1.0, pre_avg = 4.0, post_avg = 5.0, wait = 1.0)
def get_waveform_from_ndarray(audio: np.ndarray, tf):
audio = tf.convert_to_tensor(audio)
tf.cast(audio, tf.float32)
return audio
def get_waveform_from_bin(wfbin, tf):
"""Returns a tf tensor float32 waveform from a binary file"""
audio, _ = tf.audio.decode_wav(wfbin) # somewhere here it breaks.......
tf.cast(audio, tf.float32)
return tf.squeeze(audio, axis=-1)
def get_waveform_from_path(path: str, tf):
"""Returns a tf tensor float32 waveform from a path"""
wfbin = tf.io.read_file(path)
return get_waveform_from_bin(wfbin, tf)
def get_spectrogram(waveform, tf):
"""Takes a tf.float32 waveform and returns a spectrogram. Max size = 16000 samples"""
if tf.shape(waveform) > 16000:
waveform = waveform[:16000]
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32) #fix this so the padding isn't huge
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=255, frame_step=128)
spectrogram = tf.abs(spectrogram)
spectrogram = tf.expand_dims(spectrogram, -1)
return spectrogram
def numpy_to_tfdata(note: np.ndarray, tf):
"""Turn a numpy buffer note into a tensorflow dataset of the spectrogram"""
waveform = get_waveform_from_ndarray(note, tf)
spec = get_spectrogram(waveform, tf)
ds = tf.data.Dataset.from_tensors([spec])
return ds
def int_to_string_results(int_results: List[int], techniques: List[str]) -> List[str]:
return list(map(lambda i: techniques[i], int_results))
def prediction_to_int_ranks(prediction, tf):
sftmax = tf.nn.softmax(prediction[0])
sorted = np.sort(sftmax)[::-1]
index_of = lambda x: np.where(sftmax == x)[0][0]
prediction_ranks = list(map(index_of, sorted))
return prediction_ranks
def plot_prediction(techniques, prediction, tf):
"""view a matplotlib graph of the prediction"""
plt.bar(techniques, tf.nn.softmax(prediction[0]))
plt.title(f'Predictions for new note:')
plt.show()
def note_above_threshold(note: np.ndarray) -> bool:
"""Checks if the peak of a note is above a set threshold"""
if np.max(np.abs(note)) > 0.09:
return True
else:
return False
def get_partials(waveform: np.ndarray, sr: int) -> List[float]:
normalized_wf = np.int16((waveform / waveform.max()) * 32767)
N = len(normalized_wf)
yf = rfft(normalized_wf)
xf = rfftfreq(N, 1 / sr)
half = len(xf) // 2
peak_sig = np.abs(yf[:half])
peaks, d = signal.find_peaks(peak_sig, height=100000, distance=250) # This can be tweaked for better results
peaks_amps = np.array(list(map(lambda p: [p, peak_sig[p]], peaks)))
sorted_peaks = peaks_amps[peaks_amps[:, 1].argsort()][::-1]
sorted_freqs = list(map(lambda i: xf[int(i)], sorted_peaks[:, 0]))
sorted_freqs = filter(lambda freq: freq > 80, sorted_freqs)
return list(sorted_freqs)
if __name__ == "__main__":
print(TECHNIQUES) | trian-gles/ai-technique-classification | utilities/analysis.py | analysis.py | py | 4,193 | python | en | code | 0 | github-code | 36 |
73175113705 | import pandas as pd
import numpy as np
from warnings import simplefilter
simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
aaLi = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aaChargeDi = {'A': 0, 'C': 0, 'D': -1, 'E': -1, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'K': 1.0,
'L': 0, 'M': 0, 'N': 0, 'P': 0, 'Q': 0, 'R': 1.0, 'S': 0, 'T': 0, 'V': 0,
'W': 0, 'Y': 0} # positive: R,K negative: D,E neutral: others
aaHydroIdxDi = {'I': 4.5, 'V': 4.2, 'L': 3.8, 'F': 2.8, 'C': 2.5, 'M': 1.9, 'A': 1.8, 'G': -0.4, 'T': -0.7, 'W': -0.9,
'S': -0.8, 'Y': -1.3, 'P': -1.6, 'H': -3.2, 'E': -3.5, 'Q': -3.5, 'D': -3.5, 'N': -3.5, 'K': -3.9,
'R': -4.5} # hydropathy index (GRAVY index)
aaHyd1Di = {'A': 0.62, 'C': 0.29, 'D': -0.9, 'E': -0.74, 'F': 1.19, 'G': 0.48, 'H': -0.4, 'I': 1.38, 'K': -1.5,
'L': 1.06, 'M': 0.64, 'N': -0.78, 'P': 0.12, 'Q': -0.85, 'R': -2.53, 'S': -0.18, 'T': -0.05, 'V': 1.08,
'W': 0.81, 'Y': 0.26} # hydrophobicity
aaHyd2Di = {'A': -0.5, 'C': -1, 'D': 3, 'E': 3, 'F': -2.5, 'G': 0, 'H': -0.5, 'I': -1.8, 'K': 3,
'L': -1.8, 'M': -1.3, 'N': 2, 'P': 0, 'Q': 0.2, 'R': 3, 'S': 0.3, 'T': -0.4, 'V': -1.5,
'W': -3.4, 'Y': -2.3} # hydrophilicity
aaPIDi = {'A': 6, 'C': 5.07, 'D': 2.77, 'E': 3.22, 'F': 5.48, 'G': 5.97, 'H': 7.59, 'I': 6.02, 'K': 9.74,
'L': 5.98, 'M': 5.74, 'N': 5.41, 'P': 6.3, 'Q': 5.65, 'R': 10.76, 'S': 5.68, 'T': 5.6, 'V': 5.96,
'W': 5.89, 'Y': 5.66} # isoelectric point
aaPKa1Di = {'A': 2.34, 'C': 1.96, 'D': 1.88, 'E': 2.19, 'F': 1.83, 'G': 2.34, 'H': 1.82, 'I': 2.36, 'K': 2.18,
'L': 2.36, 'M': 2.28, 'N': 2.02, 'P': 1.99, 'Q': 2.17, 'R': 2.17, 'S': 2.21, 'T': 2.09, 'V': 2.32,
'W': 2.83, 'Y': 2.2} # pKa1: carboxyl group (-COOH)
aaPKa2Di = {'A': 9.69, 'C': 8.18, 'D': 9.6, 'E': 9.67, 'F': 9.13, 'G': 9.6, 'H': 9.17, 'I': 9.6, 'K': 8.95,
'L': 9.6, 'M': 9.21, 'N': 8.8, 'P': 10.6, 'Q': 9.13, 'R': 9.04, 'S': 9.15, 'T': 9.1, 'V': 9.62,
'W': 9.39, 'Y': 9.11} # pKa2: ammonium group (NH2-)
aaVolumeDi = {'A': 91.5, 'R': 196.1, 'N': 138.3, 'D': 135.2, 'C': 102.4, 'Q': 156.4, 'E': 154.6, 'G': 67.5, 'H': 163.2,
'I': 162.6, 'L': 163.4, 'K': 162.5, 'M': 165.9, 'F': 198.8, 'P': 123.4, 'S': 102.0, 'T': 126.0,
'W': 237.2, 'Y': 209.8, 'V': 138.4}
aaVSCDi = {'A': 27.5, 'C': 44.6, 'D': 40, 'E': 62, 'F': 115.5, 'G': 0, 'H': 79, 'I': 93.5, 'K': 100, 'L': 93.5,
'M': 94.1, 'N': 58.7, 'P': 41.9, 'Q': 80.7, 'R': 105, 'S': 29.3, 'T': 51.3, 'V': 71.5, 'W': 145.5, 'Y': 117.3} # volume of side chain
aaGappDi = {'A': 0.11, 'R': 2.58, 'N': 2.05, 'D': 3.49, 'C': -0.13, 'Q': 2.36, 'E': 2.68, 'G': 0.74, 'H': 2.06, 'I': -0.6,
'L': -0.55, 'K': 2.71, 'M': -0.1, 'F': -0.32, 'P': 2.23, 'S': 0.84, 'T': 0.52, 'W': 0.3, 'Y': 0.68, 'V': -0.31} # free energy of transmembrane helix
aaPol1Di = {'A': 8.1, 'C': 5.5, 'D': 13, 'E': 12.3, 'F': 5.2, 'G': 9, 'H': 10.4, 'I': 5.2, 'K': 11.3, 'L': 4.9,
'M': 5.7, 'N': 11.6, 'P': 8, 'Q': 10.5, 'R': 10.5, 'S': 9.2, 'T': 8.6, 'V': 5.9, 'W': 5.4, 'Y': 6.2} # polarity
aaPol2Di = {'A': 0.046, 'C': 0.128, 'D': 0.105, 'E': 0.151, 'F': 0.29, 'G': 0, 'H': 0.23, 'I': 0.186, 'K': 0.219, 'L': 0.186,
'M': 0.221, 'N': 0.134, 'P': 0.131, 'Q': 0.18, 'R': 0.291, 'S': 0.062, 'T': 0.108, 'V': 0.14, 'W': 0.409, 'Y': 0.298} # polarizability
aaNCISDi = {'A': 0.007187, 'C': -0.03661, 'D': -0.02382, 'E': -0.006802, 'F': 0.037552, 'G': 0.179052, 'H': -0.01069,
'I': 0.021631, 'K': 0.017708, 'L': 0.051672, 'M': 0.002683, 'N': 0.005392, 'P': 0.239531, 'Q': 0.049211,
'R': 0.043587, 'S': 0.004627,
'T': 0.003352, 'V': 0.057004, 'W': 0.037977, 'Y': 0.0323599} # net charge index of side chain
aaSASADi = {'A': 1.181, 'C': 1.461, 'D': 1.587, 'E': 1.862, 'F': 2.228, 'G': 0.881, 'H': 2.025, 'I': 1.81, 'K': 2.258,
'L': 1.931, 'M': 2.034, 'N': 1.655, 'P': 1.468, 'Q': 1.932, 'R': 2.56, 'S': 1.298, 'T': 1.525, 'V': 1.645,
'W': 2.663, 'Y': 2.368} # solvent accessibility of surface area
## Given a dictionary of peptide sequences and a list of feature names, this function returns a feature tables for the peptides.
## The acceptable feature names include AAC, DPC, NBP, CBP, Hydro, Hyd1, Hyd2, FEtmh, Pol1, Pol2, Vol, VSC, SA, pI, Chg, NCIS, pKa1, pKa2.
def GenerateFeatureTableGivenSeqDiAndFeatureLi(seq_di, feature_list, islabeled=True):
df = GenerateInitialSequenceDataframe(seq_di, islabeled=islabeled)
if 'Hydro' in feature_list:
GenerateFeature_HydropathyIndex(df)
if 'Hyd1' in feature_list:
GenerateFeature_Hydrophobicity(df)
if 'Hyd2' in feature_list:
GenerateFeature_Hydrophilicity(df)
if 'FEtmh' in feature_list:
GenerateFeature_FreeEnergyTMH(df)
if 'Pol1' in feature_list:
GenerateFeature_Polarity(df)
if 'Pol2' in feature_list:
GenerateFeature_Polarizability(df)
if 'Vol' in feature_list:
GenerateFeature_Volume(df)
if 'VSC' in feature_list:
GenerateFeature_VolumeSideChain(df)
if 'SASA' in feature_list or 'SA' in feature_list:
GenerateFeature_SolventAccessibleSurfaceArea(df)
if 'pI' in feature_list:
GenerateFeature_pHatIsoelectricPoint(df)
if 'Chg' in feature_list or 'Charge' in feature_list:
GenerateFeature_AminoAcidCharge(df)
if 'NCIS' in feature_list:
GenerateFeature_NetChargeIndexOfSideChain(df)
if 'pKa2' in feature_list:
GenerateFeature_pKaNH2(df)
if 'pKa1' in feature_list:
GenerateFeature_pKaCOOH(df)
if 'AAC' in feature_list:
GenerateFeature_AAC(df)
if 'DPC' in feature_list:
GenerateFeature_DPC(df)
if 'CBP' in feature_list:
GenerateFeature_CBP(df)
if 'NBP' in feature_list:
GenerateFeature_NBP(df)
return df
def GenerateInitialSequenceDataframe(seq_dict, tag_pos ='pos_', tag_neg ='neg_', islabeled=True):
if len(seq_dict) == 0:
return None
df = pd.DataFrame(list(seq_dict.items()), columns=['Name', 'Sequence'])
if islabeled is True:
df['TrueLabel'] = df['Name'].map(lambda x: 1 if x.startswith(tag_pos) else 0) # assign labels according to the prefix in the entry names
df['Length'] = df['Sequence'].map(lambda s: len(s))
return df
## amino acid composition (dimension 20)
def GenerateFeature_AAC(df):
if 'Sequence' not in df.columns:
return
for residue in aaLi:
labelName = 'AAC_' + residue
df[labelName] = df['Sequence'].map(lambda s: s.count(residue) / len(s))
## dipeptide composition (dimension 400)
def GenerateFeature_DPC(df):
if 'Sequence' not in df.columns:
return
for residue1 in aaLi:
for residue2 in aaLi:
dp = residue1 + residue2
labelName = 'DP_' + dp
df[labelName] = df['Sequence'].map(lambda s: s.count(dp) / (len(s) - 1))
## N-terminal binary profile with k amino acids (dimension k*20)
def GenerateFeature_NBP(df, kmer=8):
if 'Sequence' not in df.columns:
return
for i in range(1, kmer + 1):
pos = i - 1
for residue in aaLi:
labelName = 'N' + str(kmer) + 'mer_' + str(i) + '_' + residue
df[labelName] = df['Sequence'].map(lambda s: 1 if s[pos] == residue else 0)
## C-terminal binary profile with k amino acids (dimension k*20)
def GenerateFeature_CBP(df, kmer=8):
if 'Sequence' not in df.columns:
return
for i in range(1, kmer + 1):
pos = i - kmer - 1
for residue in aaLi:
labelName = 'C' + str(kmer) + 'mer_' + str(i) + '_' + residue
df[labelName] = df['Sequence'].map(lambda s: 1 if s[pos] == residue else 0)
## The functions below create features by averaging the specific physicochemical properties of amino acids in peptide sequences
def GenerateFeature_AminoAcidCharge(df):
if 'Sequence' not in df.columns:
return
df['Chg'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaChargeDi))
def GenerateFeature_NetChargeIndexOfSideChain(df):
if 'Sequence' not in df.columns:
return
df['NCIS'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaNCISDi))
def GenerateFeature_HydropathyIndex(df):
if 'Sequence' not in df.columns:
return
df['Hydro'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaHydroIdxDi))
def GenerateFeature_Hydrophobicity(df):
if 'Sequence' not in df.columns:
return
df['Hyd1'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaHyd1Di))
def GenerateFeature_Hydrophilicity(df):
if 'Sequence' not in df.columns:
return
df['Hyd2'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaHyd2Di))
def GenerateFeature_pHatIsoelectricPoint(df):
if 'Sequence' not in df.columns:
return
df['pI'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaPIDi))
def GenerateFeature_pKaCOOH(df):
if 'Sequence' not in df.columns:
return
df['pKa1'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaPKa1Di))
def GenerateFeature_pKaNH2(df):
if 'Sequence' not in df.columns:
return
df['pKa2'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaPKa2Di))
def GenerateFeature_Volume(df):
if 'Sequence' not in df.columns:
return
df['Vol'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaVolumeDi))
def GenerateFeature_VolumeSideChain(df):
if 'Sequence' not in df.columns:
return
df['VSC'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaVSCDi))
def GenerateFeature_FreeEnergyTMH(df):
if 'Sequence' not in df.columns:
return
df['FEtmh'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaGappDi))
def GenerateFeature_Polarity(df):
if 'Sequence' not in df.columns:
return
df['Pol1'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaPol1Di))
def GenerateFeature_Polarizability(df):
if 'Sequence' not in df.columns:
return
df['Pol2'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaPol2Di))
def GenerateFeature_SolventAccessibleSurfaceArea(df):
if 'Sequence' not in df.columns:
return
df['SA'] = df['Sequence'].map(lambda s: ComputeAveragedIndex(s, aaSASADi))
def ComputeAveragedIndex(seq, aa_index_dict):
index = 0
for residue in seq:
index += aa_index_dict[residue]
index /= len(seq)
return index
| comics-asiis/ToxicPeptidePrediction | program_resource/extractfeature.py | extractfeature.py | py | 10,874 | python | en | code | 0 | github-code | 36 |
21334783707 | from os.path import join, dirname
from pandas import read_csv
from pathlib import Path
from climateeconomics.core.core_agriculture.crop import Crop
from sostrades_core.execution_engine.execution_engine import ExecutionEngine
from sostrades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
from energy_models.core.stream_type.energy_models.biomass_dry import BiomassDry
from climateeconomics.sos_wrapping.sos_wrapping_agriculture.crop.crop_disc import CropDiscipline
import unittest
import pandas as pd
import numpy as np
class AgricultureJacobianDiscTest(AbstractJacobianUnittest):
#AbstractJacobianUnittest.DUMP_JACOBIAN = True
def setUp(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
'''
Initialize third data needed for testing
'''
self.year_start = 2020
self.year_end = 2055
self.time_step = 1
years = np.arange(self.year_start, self.year_end + 1, 1)
year_range = self.year_end - self.year_start + 1
population = np.array(np.linspace(7800, 7800, year_range))
self.population_df = pd.DataFrame(
{"years": years, "population": population})
self.population_df.index = years
temperature = np.array(np.linspace(1.05, 5, year_range))
self.temperature_df = pd.DataFrame(
{"years": years, "temp_atmo": temperature})
self.temperature_df.index = years
lifetime = 50
# Age distribution of forests in 2008 (
initial_age_distribution = pd.DataFrame({'age': np.arange(1, lifetime),
'distrib': [0.16, 0.24, 0.31, 0.39, 0.47, 0.55, 0.63, 0.71, 0.78, 0.86,
0.94, 1.02, 1.1, 1.18, 1.26, 1.33, 1.41, 1.49, 1.57, 1.65,
1.73, 1.81, 1.88, 1.96, 2.04, 2.12, 2.2, 2.28, 2.35, 2.43,
2.51, 2.59, 2.67, 2.75, 2.83, 2.9, 2.98, 3.06, 3.14, 3.22,
3.3, 3.38, 3.45, 3.53, 3.61, 3.69, 3.77, 3.85, 3.92]})
self.default_kg_to_m2 = {'red meat': 360,
'white meat': 16,
'milk': 8.95,
'eggs': 6.3,
'rice and maize': 2.9,
'potatoes': 0.88,
'fruits and vegetables': 0.8,
'other': 21.4,
}
self.default_kg_to_kcal = {'red meat': 2566,
'white meat': 1860,
'milk': 550,
'eggs': 1500,
'rice and maize': 1150,
'potatoes': 670,
'fruits and vegetables': 624,
}
red_meat_percentage = np.linspace(6, 1, year_range)
white_meat_percentage = np.linspace(14, 5, year_range)
self.red_meat_percentage = pd.DataFrame({
'years': years,
'red_meat_percentage': red_meat_percentage})
self.white_meat_percentage = pd.DataFrame({
'years': years,
'white_meat_percentage': white_meat_percentage})
self.diet_df = pd.DataFrame({'red meat': [11.02],
'white meat': [31.11],
'milk': [79.27],
'eggs': [9.68],
'rice and maize': [97.76],
'potatoes': [32.93],
'fruits and vegetables': [217.62],
})
self.other = np.array(np.linspace(0.102, 0.102, year_range))
# investment: 1Mha of crop land each year
self.crop_investment = pd.DataFrame(
{'years': years, 'investment': np.ones(len(years)) * 0.381})
self.margin = pd.DataFrame(
{'years': years, 'margin': np.ones(len(years)) * 110.0})
# From future of hydrogen
self.transport_cost = pd.DataFrame(
{'years': years, 'transport': np.ones(len(years)) * 7.6})
# bioenergyeurope.org : Dedicated energy crops
# represent 0.1% of the total biomass production in 2018
energy_crop_percentage = 0.005
# ourworldindata, average cereal yield: 4070kg/ha +
# average yield of switchgrass on grazing lands: 2565,67kg/ha
# residue is 0.25 more than that
density_per_ha = 2903 * 1.25
# available ha of crop: 4.9Gha, initial prod = crop energy + residue for
# energy of all surfaces
self.initial_production = 4.8 * density_per_ha * 3.6 * energy_crop_percentage # in TWh
self.param = {'year_start': self.year_start,
'year_end': self.year_end,
'time_step': self.time_step,
'diet_df': self.diet_df,
'kg_to_kcal_dict': self.default_kg_to_kcal,
'population_df': self.population_df,
'temperature_df': self.temperature_df,
'kg_to_m2_dict': self.default_kg_to_m2,
'red_meat_percentage': self.red_meat_percentage,
'white_meat_percentage': self.white_meat_percentage,
'other_use_crop': self.other,
'param_a': - 0.00833,
'param_b': - 0.04167,
'crop_investment': self.crop_investment,
'margin': self.margin,
'transport_margin': self.margin,
'transport_cost': self.transport_cost,
'data_fuel_dict': BiomassDry.data_energy_dict,
'techno_infos_dict': CropDiscipline.techno_infos_dict_default,
'scaling_factor_crop_investment': 1e3,
'initial_age_distrib': initial_age_distribution,
'initial_production': self.initial_production
}
def analytic_grad_entry(self):
return [
self.test_agriculture_discipline_analytic_grad
]
def test_agriculture_discipline_analytic_grad(self):
self.model_name = 'crop'
ns_dict = {'ns_public': f'{self.name}',
'ns_witness': f'{self.name}',
'ns_functions': f'{self.name}',
'ns_biomass_dry': f'{self.name}',
'ns_land_use':f'{self.name}',
'ns_crop':f'{self.name}',
'ns_invest':f'{self.name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_agriculture.crop.crop_disc.CropDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
values_dict = {f'{self.name}.year_start': self.year_start,
f'{self.name}.year_end': self.year_end,
f'{self.name}.{self.model_name}.diet_df': self.diet_df,
f'{self.name}.{self.model_name}.kg_to_kcal_dict': self.default_kg_to_kcal,
f'{self.name}.{self.model_name}.kg_to_m2_dict': self.default_kg_to_m2,
f'{self.name}.population_df': self.population_df,
f'{self.name}.temperature_df': self.temperature_df,
f'{self.name}.red_meat_percentage': self.red_meat_percentage,
f'{self.name}.white_meat_percentage': self.white_meat_percentage,
f'{self.name}.{self.model_name}.{Crop.OTHER_USE_CROP}': self.other,
f'{self.name}.crop_investment': self.crop_investment,
f'{self.name}.margin': self.margin,
f'{self.name}.transport_margin': self.margin,
f'{self.name}.transport_cost': self.transport_cost,
f'{self.name}.data_fuel_dict': BiomassDry.data_energy_dict
}
self.ee.load_study_from_input_dict(values_dict)
self.ee.execute()
disc_techno = self.ee.root_process.proxy_disciplines[0].mdo_discipline_wrapp.mdo_discipline
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_crop_discipline.pkl', discipline=disc_techno, local_data=disc_techno.local_data,
step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.population_df',
f'{self.name}.temperature_df',
f'{self.name}.red_meat_percentage',
f'{self.name}.white_meat_percentage',
f'{self.name}.crop_investment',
],
outputs=[f'{self.name}.total_food_land_surface',
f'{self.name}.land_use_required',
f'{self.name}.techno_prices',
f'{self.name}.techno_production',
f'{self.name}.techno_consumption',
f'{self.name}.techno_consumption_woratio',
f'{self.name}.CO2_emissions',
f'{self.name}.CO2_land_emission_df',
f'{self.name}.CH4_land_emission_df',
f'{self.name}.N2O_land_emission_df',
])
| os-climate/witness-core | climateeconomics/tests/l1_test_gradient_crop_discipline.py | l1_test_gradient_crop_discipline.py | py | 10,121 | python | en | code | 7 | github-code | 36 |
21787505234 | import json
from datetime import date,timedelta
path = '/Users/evcu/GitHub/evcu.github.io//assets/nyc365blog/data.json'
data = {}
newel = {}
newel[u'date'] = str(date.today()-timedelta(days=365))
newel[u'mood'] = str(input("Enter mood -1/0/1:\n"))
print(newel)
newel[u'high'] = str(input("Highlights\n"))
newel[u'low'] = str(input("Lowlights:\n"))
newel[u'other'] = str(input("Other:\n"))
newel[u'text'] = str(input("Random Thoughts:\n"))
print(newel)
with open(path,'r') as data_file:
print('Successfuly read')
data = json.load(data_file)
data.append(newel)
with open(path,'w') as data_file:
data_file.write(json.dumps(data, sort_keys=True, indent=4))
print('Successfuly written')
| evcu/evcu.github.io | assets/nyc365blog/newDay.py | newDay.py | py | 704 | python | en | code | 1 | github-code | 36 |
21570782786 | class Solution:
def deleteAndEarn(self, nums: List[int]) -> int:
#storing preprocessed nums
nums = sorted(nums)
hashmap = defaultdict(int)
maxnumber = 0
for i in nums:
hashmap[i] += i
maxnumber = max(i, maxnumber)
@cache
def maxProfit(i):
if i == 0:
return 0
if i == 1:
return hashmap[1]
return max(maxProfit(i-1), maxProfit(i-2)+hashmap[i])
return maxProfit(maxnumber)
| gourab337/leetcode | DP/deleteAndEarn.py | deleteAndEarn.py | py | 562 | python | en | code | 0 | github-code | 36 |
23597687031 | import sieve
from typing import Dict, List
import os
from dotenv import load_dotenv
load_dotenv()
api_key = os.environ.get('SIEVE_API_KEY')
sieve.SIEVE_API_KEY = os.getenv('SIEVE_API_KEY')
sieve.SIEVE_API_URL = os.getenv('SIEVE_API_URL')
@sieve.Model(
name="deepface-emotion-detector",
gpu = True,
python_packages=[
"opencv-python==4.6.0.66",
"tensorflow==2.11.0",
"pandas==1.5.3",
"numpy==1.24.2",
"deepface==0.0.79",
'mediapipe==0.9.0'
],
python_version="3.8",
)
class EmotionDetector:
def __setup__(self):
from deepface import DeepFace
self.model = DeepFace.build_model('Emotion')
# Load the weights from the saved H5 file
self.emotion_labels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'neutral', 5: 'sad', 6: 'surprise'}
import mediapipe as mp
self.mp_face_detection = mp.solutions.face_detection
self.face_detection = self.mp_face_detection.FaceDetection(min_detection_confidence=0.7)
def detect_faces(self, img: sieve.Image):
import cv2
import numpy as np
results = self.face_detection.process(cv2.cvtColor(img.array, cv2.COLOR_BGR2RGB))
faces = []
if results.detections:
for detection in results.detections:
bounding_box = detection.location_data.relative_bounding_box
x = int(bounding_box.xmin * img.width)
w = int(bounding_box.width * img.width)
y = int(bounding_box.ymin * img.height)
h = int(bounding_box.height * img.height)
detected_face = img.array[y : y + h, x : x + w]
face_array = np.array(detected_face)
bbox = [x, y, w, h]
faces.append({
"array": face_array,
"box": bbox,
"class_name": "face",
"score": detection.score[0],
"frame_number": None if not hasattr(img, "frame_number") else img.frame_number
})
return faces
def __predict__(self, img: sieve.Image) -> List:
import tensorflow as tf
import numpy as np
from deepface import DeepFace
import cv2
outputs = []
faces = self.detect_faces(img)
for face in faces:
face_img = face['array']
#preprocess the face image
if face_img is not None and np.any(face_img):
gray_face = cv2.cvtColor(face_img, cv2.COLOR_RGB2GRAY)
resized_face = cv2.resize(gray_face, (48, 48))
preprocessed_face = tf.keras.applications.resnet50.preprocess_input(resized_face)
preprocessed_face = np.expand_dims(preprocessed_face, axis=0)
#predict the emotion of the face image
emotions = self.model.predict(preprocessed_face)[0]
labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
dominant_emotion = np.argmax(emotions)
emotion_label = self.emotion_labels[dominant_emotion]
confidence = emotions[dominant_emotion]
outputs.append({
"frame_number": face['frame_number'],
"class_name": "face",
"box": face["box"],
"score": face["score"],
"emotion": emotion_label,
"confidence": confidence
})
return outputs
| GauravMohan1/sieve_emotion_face_tracker | main.py | main.py | py | 3,571 | python | en | code | 0 | github-code | 36 |
38568004929 | '''
113
Given a binary tree and a sum, find all root-to-leaf paths where each path's sum equals the given sum.
Note: A leaf is a node with no children.
Example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
Return:
[
[5,4,11,2],
[5,8,4,5]
]
'''
class TreeNode():
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def pathSum(root, sum, nlist, all_paths):
if root == None:
return all_paths
else :
if root.val == sum and root.left == None and root.right == None:
return all_paths + [nlist + [root.val]]
else :
left = pathSum(root.left, sum - root.val, nlist + [root.val] , all_paths)
right = pathSum(root.right, sum - root.val, nlist + [root.val] , all_paths)
return left + right
'''
calls nlist + [root.val] changes made in that call
1 [] [1]
2 [1]+[2] [1,2]
3 [1,2]+[3] [1,2,3]
4 [1,2,3] [1,2]
5 [1,2] [1,2,4]
6 [1] []
pathSum(1,7,[],[]):------------1
left = pathSum(2,6,[1],[])--------------2
left = pathSum(3,4,[1,2],[])-----------3
pathsum(none,1,[1,2,3],[])------------4
left = [] # all_paths (here we get empty path because we have not hit the right path that satisfies the condition)
right = pathSum(4,4,[1,2],[])--------------------5
return all_paths + [nlist + [root.val]]
return [] + [[1,2] + [4]]
right = [[1,2,4]]
return left+right = [] + [[1,2,4]] = [[1,2,4]]
left = [[1,2,4]]
right = pathSum(6,6,[1],[[1,2,4]])--------------6
return all_paths + [nlist + [root.val]]
return [] + [[1] + [6]]
return [] + [[1,6]]
return [] + [[1,6]]
return [[1,6]]
right = [[1,6]]
return left + right
[[1,2,4]] + [[1,6]]
[[1,2,4],[1,6]]
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(6)
root.left.left = TreeNode(4)
root.left.right = TreeNode(3)
root.left.right.right = TreeNode(1)
print(pathSum(root,7,[], [] )) | archanakalburgi/Algorithms | binary_tree/pathsum2.py | pathsum2.py | py | 2,490 | python | en | code | 1 | github-code | 36 |
16321882534 | import logging
from mmpose.apis.inference import inference_top_down_pose_model, init_pose_model, vis_pose_result
from mmpose.datasets import DatasetInfo
logger = logging.getLogger(__name__)
def loadModel(configPath, ckptPath, device, half):
model = init_pose_model(configPath, ckptPath, str(device).lower())
dataset = model.cfg.data['test']['type']
dataset_info = model.cfg.data['test'].get('dataset_info', None)
if dataset_info is None:
logger.warning(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.'
)
else:
dataset_info = DatasetInfo(dataset_info)
return model, dataset, dataset_info
def inference(model, image, plotted, bboxes, dataset, dataset_info, device, half):
# pose estimate
pose_results, returned_outputs = inference_top_down_pose_model(
model,
image,
bboxes,
format='xywh',
dataset=dataset,
dataset_info=dataset_info
)
points = {d['track_id']: [d['bbox'], d['keypoints']] for d in pose_results}
# plot bboxes and skeletons
plotted = vis_pose_result(
model,
plotted,
pose_results,
dataset=dataset,
dataset_info=dataset_info,
kpt_score_thr=0.3,
radius=4,
thickness=1,
show=False,
out_file=None
)
return points, plotted
| ideguchi92/assignment | src/vitposeModule.py | vitposeModule.py | py | 1,323 | python | en | code | 0 | github-code | 36 |
38715427758 | light_matrix_string = "00000:00000:00000:00000:00000"
# Convert light matrix to a multidimensional array
def convert_light_string_to_array(array_str):
outer_demension = array_str.split(':')
multi_dimension_array = []
for inner in outer_demension:
inner_list = [int(char) for char in inner]
multi_dimension_array.append(inner_list)
return multi_dimension_array
# Convert Light Array to a string to pass to lego hub
def convert_matrix_to_string(matrix):
return ':'.join([''.join(map(str, row)) for row in matrix])
# Manipulate the string directly
def toggle_light_by_index(matrix_str, row, col, brightness):
index = row * 6 + col # 5 characters per row plus a colon
matrix_str = matrix_str[:index] + brightness + matrix_str[index + 1:]
return matrix_str
# for loop example
# range(start at, end before, increment)
for i in range(0,8,1): # will count from 1-7
print('the current index (i) is:', i)
print('\n')
print('light matrix as a string:')
print(light_matrix_string)
print('\n')
print('light matrix as a matrix:')
print (convert_light_string_to_array(light_matrix_string))
| igMike-V/kids-python-challenges | legoDimensions/lightMatrix.py | lightMatrix.py | py | 1,159 | python | en | code | 0 | github-code | 36 |
35196657682 | import os
import dataclasses
import unittest
import torch
import math
import copy
import numpy as np
import lpips
from dataset.co3d_dataset import Co3dDataset, FrameData
from dataset.dataset_zoo import DATASET_ROOT
from tools.utils import dataclass_to_cuda_
from evaluation.evaluate_new_view_synthesis import eval_batch
from tools.metric_utils import calc_psnr, eval_depth
from models.model_dbir import ModelDBIR
class TestEvaluation(unittest.TestCase):
def setUp(self):
# initialize evaluation dataset/dataloader
torch.manual_seed(42)
category = "skateboard"
dataset_root = DATASET_ROOT
frame_file = os.path.join(dataset_root, category, "frame_annotations.jgz")
sequence_file = os.path.join(dataset_root, category, "sequence_annotations.jgz")
self.image_size = 256
self.dataset = Co3dDataset(
frame_annotations_file=frame_file,
sequence_annotations_file=sequence_file,
dataset_root=dataset_root,
image_height=self.image_size,
image_width=self.image_size,
box_crop=True,
load_point_clouds=True,
)
self.bg_color = 0.0
# init the lpips model for eval
self.lpips_model = lpips.LPIPS(net="vgg")
def test_eval_depth(self):
"""
Check that eval_depth correctly masks errors and that, for get_best_scale=True,
the error with scaled prediction equals the error without scaling the
predicted depth. Finally, test that the error values are as expected
for prediction and gt differing by a constant offset.
"""
gt = (torch.randn(10, 1, 300, 400, device="cuda") * 5.0).clamp(0.0)
mask = (torch.rand_like(gt) > 0.5).type_as(gt)
for diff in 10 ** torch.linspace(-5, 0, 6):
for crop in (0, 5):
pred = gt + (torch.rand_like(gt) - 0.5) * 2 * diff
# scaled prediction test
mse_depth, abs_depth = eval_depth(
pred,
gt,
crop=crop,
mask=mask,
get_best_scale=True,
)
mse_depth_scale, abs_depth_scale = eval_depth(
pred * 10.0,
gt,
crop=crop,
mask=mask,
get_best_scale=True,
)
self.assertAlmostEqual(
float(mse_depth.sum()), float(mse_depth_scale.sum()), delta=1e-4
)
self.assertAlmostEqual(
float(abs_depth.sum()), float(abs_depth_scale.sum()), delta=1e-4
)
# error masking test
pred_masked_err = gt + (torch.rand_like(gt) + diff) * (1 - mask)
mse_depth_masked, abs_depth_masked = eval_depth(
pred_masked_err,
gt,
crop=crop,
mask=mask,
get_best_scale=True,
)
self.assertAlmostEqual(
float(mse_depth_masked.sum()), float(0.0), delta=1e-4
)
self.assertAlmostEqual(
float(abs_depth_masked.sum()), float(0.0), delta=1e-4
)
mse_depth_unmasked, abs_depth_unmasked = eval_depth(
pred_masked_err,
gt,
crop=crop,
mask=1 - mask,
get_best_scale=True,
)
self.assertGreater(
float(mse_depth_unmasked.sum()),
float(diff ** 2),
)
self.assertGreater(
float(abs_depth_unmasked.sum()),
float(diff),
)
# tests with constant error
pred_fix_diff = gt + diff * mask
for _mask_gt in (mask, None):
mse_depth_fix_diff, abs_depth_fix_diff = eval_depth(
pred_fix_diff,
gt,
crop=crop,
mask=_mask_gt,
get_best_scale=False,
)
if _mask_gt is not None:
expected_err_abs = diff
expected_err_mse = diff ** 2
else:
err_mask = (gt > 0.0).float() * mask
if crop > 0:
err_mask = err_mask[:, :, crop:-crop, crop:-crop]
gt_cropped = gt[:, :, crop:-crop, crop:-crop]
else:
gt_cropped = gt
gt_mass = (gt_cropped > 0.0).float().sum(dim=(1, 2, 3))
expected_err_abs = (
diff * err_mask.sum(dim=(1, 2, 3)) / (gt_mass)
)
expected_err_mse = diff * expected_err_abs
self.assertTrue(
torch.allclose(
abs_depth_fix_diff,
expected_err_abs * torch.ones_like(abs_depth_fix_diff),
atol=1e-4,
)
)
self.assertTrue(
torch.allclose(
mse_depth_fix_diff,
expected_err_mse * torch.ones_like(mse_depth_fix_diff),
atol=1e-4,
)
)
def test_psnr(self):
"""
Compare against opencv and check that the psnr is above
the minimum possible value.
"""
import cv2
im1 = torch.rand(100, 3, 256, 256).cuda()
for max_diff in 10 ** torch.linspace(-5, 0, 6):
im2 = im1 + (torch.rand_like(im1) - 0.5) * 2 * max_diff
im2 = im2.clamp(0.0, 1.0)
# check that our psnr matches the output of opencv
psnr = calc_psnr(im1, im2)
psnr_cv2 = cv2.PSNR(
im1.cpu().numpy().astype(np.float64),
im2.cpu().numpy().astype(np.float64),
1.0,
)
self.assertAlmostEqual(float(psnr), float(psnr_cv2), delta=1e-4)
# check that all psnrs are bigger than the minimum possible psnr
max_mse = max_diff ** 2
min_psnr = 10 * math.log10(1.0 / max_mse)
for _im1, _im2 in zip(im1, im2):
_psnr = calc_psnr(_im1, _im2)
self.assertTrue(float(_psnr) >= min_psnr)
def _one_sequence_test(
self,
seq_dataset,
n_batches=2,
min_batch_size=5,
max_batch_size=10,
):
# form a list of random batches
batch_indices = []
for bi in range(n_batches):
batch_size = torch.randint(
low=min_batch_size, high=max_batch_size, size=(1,)
)
batch_indices.append(torch.randperm(len(seq_dataset))[:batch_size])
loader = torch.utils.data.DataLoader(
seq_dataset,
# batch_size=1,
shuffle=False,
batch_sampler=batch_indices,
collate_fn=FrameData.collate,
)
model = ModelDBIR(image_size=self.image_size, bg_color=self.bg_color)
model.cuda()
self.lpips_model.cuda()
for frame_data in loader:
self.assertIsNone(frame_data.frame_type)
# override the frame_type
frame_data.frame_type = [
"train_unseen",
*(["train_known"] * (len(frame_data.image_rgb) - 1)),
]
# move frame_data to gpu
frame_data = dataclass_to_cuda_(frame_data)
preds = model(**dataclasses.asdict(frame_data))
nvs_prediction = copy.deepcopy(preds["nvs_prediction"])
eval_result = eval_batch(
frame_data,
nvs_prediction,
bg_color=self.bg_color,
lpips_model=self.lpips_model,
)
# Make a terribly bad NVS prediction and check that this is worse
# than the DBIR prediction.
nvs_prediction_bad = copy.deepcopy(preds["nvs_prediction"])
nvs_prediction_bad.depth_render += (
torch.randn_like(nvs_prediction.depth_render) * 100.0
)
nvs_prediction_bad.image_render += (
torch.randn_like(nvs_prediction.image_render) * 100.0
)
nvs_prediction_bad.mask_render = (
torch.randn_like(nvs_prediction.mask_render) > 0.0
).float()
eval_result_bad = eval_batch(
frame_data,
nvs_prediction_bad,
bg_color=self.bg_color,
lpips_model=self.lpips_model,
)
lower_better = {
"psnr": False,
"psnr_fg": False,
"depth_abs_fg": True,
"iou": False,
"rgb_l1": True,
"rgb_l1_fg": True,
}
for metric in lower_better.keys():
m_better = eval_result[metric]
m_worse = eval_result_bad[metric]
if m_better != m_better or m_worse != m_worse:
continue # metric is missing, i.e. NaN
_assert = (
self.assertLessEqual
if lower_better[metric]
else self.assertGreaterEqual
)
_assert(m_better, m_worse)
def test_full_eval(self, n_sequences=5):
"""Test evaluation."""
for seq, idx in list(self.dataset.seq_to_idx.items())[:n_sequences]:
seq_dataset = torch.utils.data.Subset(self.dataset, idx)
self._one_sequence_test(seq_dataset)
| eldar/snes | 3rdparty/co3d/tests/test_evaluation.py | test_evaluation.py | py | 10,050 | python | en | code | 59 | github-code | 36 |
11704013339 | # -*- coding: utf-8 -*-
"""Translator module"""
from __future__ import division
from data import NUMBERS
class Translator(object):
"""Translator class"""
@classmethod
def translate(cls, data):
"""The method for converting a input data to number instance """
if isinstance(data, str):
str_result = "".join(char for char in data if char.isdigit())
value = int(str_result) if str_result else 0
elif isinstance(data, (int, float, long)):
value = data
else:
raise TypeError("Expected {} or {} / {}. But taken {}".format(str, int, float, data))
return value
def __enter__(self):
self.translate = self.context_translate
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return True
@classmethod
def context_translate(cls, data):
"""TRANSLATE methodf"""
value = cls.translate(data)
sign = "" if value >= 0 else NUMBERS["minus"]
abs_value = abs(value)
if value in NUMBERS and value < 100:
str_value = NUMBERS[abs_value]
else:
str_value = " ".join(cls._get_power_thousand(abs_value))
return " ".join((sign, str_value))
@classmethod
def _get_power_thousand(cls, value):
"""Sort out with thousand to the power"""
while value >= 1:
len_value = len(str(value))
power_thousand = 10 ** ((len_value - 1) // 3 * 3)
value_under_thousand = value // power_thousand
str_value_under_thousand = " ".join(cls._get_under_thousand(value_under_thousand))
str_power_thousand = NUMBERS[power_thousand] if power_thousand > 1 else ""
str_value = " ".join((str_value_under_thousand, str_power_thousand))
value -= value_under_thousand * power_thousand
yield str_value
@classmethod
def _get_under_thousand(cls, value):
"""Sort out with values under thousand"""
while value >= 1:
if value >= 100:
quantity_hundreds = value // 100
value -= quantity_hundreds * 100
str_and = NUMBERS["and"] if value > 0 else ""
str_value = " ".join((NUMBERS[quantity_hundreds], NUMBERS[100], str_and))
elif value in NUMBERS:
str_value = NUMBERS[value]
value = 0
else:
value_tens = value // 10 * 10
str_value = NUMBERS[value_tens]
value = value - value_tens
yield str_value
| russtanevich/num_converter | translator.py | translator.py | py | 2,596 | python | en | code | 0 | github-code | 36 |
1124194920 | #!/usr/bin/python3
from models.rectangle import Rectangle
class Square(Rectangle):
"""This represents a Square, inheriting from Rectangle."""
def __init__(personal, size, x=0, y=0, id=None):
"""This initializes a new Square.
Args:
size (int): The size of the Square.
x (int): The x-coordinate of the Square's position.
y (int): The y-coordinate of the Square's position.
id (int): The identity of the Square.
"""
super().__init__(size, size, x, y, id)
@property
def size(personal):
"""This sets the size of the Square.
Returns:
int: The size of the Square.
"""
return personal.width
@size.setter
def size(personal, value):
"""This sets the size of the Square.
Args:
value (int): The size value to set.
"""
personal.width = value
personal.height = value
def __str__(personal):
"""This returns a string representation of the Square.
Returns:
str: The string representation of the Square.
"""
return "[Square] ({}) {}/{} - {}".format(
personal.id, personal.x, personal.y, personal.width
)
def update(personal, *args, **kwargs):
"""This updates the attributes of the Square.
Args:
*args: List of arguments.
**kwargs: Dictionary of keyword arguments.
"""
if args:
attrs = ["id", "size", "x", "y"]
for i, value in enumerate(args):
setattr(personal, attrs[i], value)
else:
for key, value in kwargs.items():
setattr(personal, key, value)
def to_dictionary(personal):
"""This returns the dictionary representation of the Square.
Returns:
dict: The dictionary representation of the Square.
"""
return {
"id": personal.id,
"size": personal.size,
"x": personal.x,
"y": personal.y,
}
| Fran6ixneymar/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/square.py | square.py | py | 2,094 | python | en | code | 0 | github-code | 36 |
74260920742 | import numpy as np
import torch
import copy
from pathlib import Path
from torch_scatter import scatter
from typing import Dict, Tuple
from pcdet.datasets.v2x_sim.v2x_sim_dataset_ego import V2XSimDataset_EGO, get_pseudo_sweeps_of_1lidar, get_nuscenes_sensor_pose_in_global, apply_se3_
from pcdet.datasets.v2x_sim.v2x_sim_utils import roiaware_pool3d_utils
class V2XSimDataset_EGO_LATE(V2XSimDataset_EGO):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
super().__init__(dataset_cfg, class_names, training, root_path, logger)
assert self.mode == 'test', f"late fusion only support validation"
def _get_prediction_ego(self, sample_token: str) -> np.ndarray:
path_modar = self.exchange_database / f"{sample_token}_id1_modar.pth"
modar = torch.load(path_modar, map_location=torch.device('cpu')).numpy() if path_modar.exists() else np.zeros(1, 9)
return modar
@torch.no_grad()
def _get_prediction_agent(self, lidar_id: int, lidar_token: str, sample_token: str, exchange_setting: str) -> Tuple[np.ndarray]:
"""
Predictions are in agent's frame
"""
assert exchange_setting in ('now', 'prev')
glob_se3_lidar = get_nuscenes_sensor_pose_in_global(self.nusc, lidar_token) # (4, 4)
path_modar = self.exchange_database / f"{sample_token}_id{lidar_id}_modar.pth"
if path_modar.exists():
modar = torch.load(path_modar) # on gpu, (N_modar, 7 + 2) - box-7, score, label
# ---
# propagate modar forward
path_foregr = self.exchange_database / f"{sample_token}_id{lidar_id}_foreground.pth"
if path_foregr.exists() and exchange_setting == 'prev':
foregr = torch.load(path_foregr) # on gpu, (N_fore, 5 + 2 + 3 + 3) - point-5, sweep_idx, inst_idx, cls_prob-3, flow-3
# pool
box_idx_of_foregr = roiaware_pool3d_utils.points_in_boxes_gpu(
foregr[:, :3].unsqueeze(0), modar[:, :7].unsqueeze(0)
).squeeze(0).long() # (N_foregr,) | == -1 mean not belong to any boxes
mask_valid_foregr = box_idx_of_foregr > -1
foregr = foregr[mask_valid_foregr]
box_idx_of_foregr = box_idx_of_foregr[mask_valid_foregr]
unq_box_idx, inv_unq_box_idx = torch.unique(box_idx_of_foregr, return_inverse=True)
# weighted sum of foregrounds' offset; weights = foreground's prob dynamic
boxes_offset = scatter(foregr[:, -3:], inv_unq_box_idx, dim=0, reduce='mean') * 2. # (N_modar, 3)
# offset modar; here, assume objects maintain the same speed
modar[unq_box_idx, :3] += boxes_offset
modar = modar.cpu().numpy()
else:
modar = np.zeros((0, 9))
return modar, glob_se3_lidar
def _get_lidar_token_of_present_agents(self, sample_token: str) -> Dict[int, str]:
out = dict()
if sample_token == '':
return out
sample = self.nusc.get('sample', sample_token)
for sensor_name, sensor_token in sample['data'].items():
if sensor_name not in self._lidars_name:
continue
lidar_id = int(sensor_name.split('_')[-1])
out[lidar_id] = sensor_token
return out
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
gt_boxes, gt_names = info['gt_boxes'], info['gt_names']
# gt_boxes: (N_tot, 7)
# gt_names: (N_tot,)
ego_se3_glob = np.linalg.inv(get_nuscenes_sensor_pose_in_global(self.nusc, info['lidar_token'])) # (4, 4)
sample_token = info['token']
sample = self.nusc.get('sample', sample_token)
# get prediction of the ego vehicle @ now
exchange_boxes, exchange_metadata = dict(), dict()
exchange_boxes[1] = self._get_prediction_ego(sample_token)
exchange_metadata[1] = exchange_boxes[1].shape[0]
if self.dataset_cfg.EXCHANGE_SETTING == 'now':
dict_lidar_id_to_token = self._get_lidar_token_of_present_agents(sample_token)
_token_of_sample_of_interest = sample_token
elif self.dataset_cfg.EXCHANGE_SETTING == 'prev':
dict_lidar_id_to_token = self._get_lidar_token_of_present_agents(sample['prev'])
_token_of_sample_of_interest = sample['prev']
else:
raise NotImplementedError(f"EXCHANGE_SETTING := {self.dataset_cfg.EXCHANGE_SETTING} is unknown")
if len(dict_lidar_id_to_token) > 0:
for lidar_id, lidar_token in dict_lidar_id_to_token.items():
if lidar_id == 1:
# ego vehicle is already handled above
continue
modar, glob_se3_lidar = self._get_prediction_agent(lidar_id, lidar_token, _token_of_sample_of_interest, self.dataset_cfg.EXCHANGE_SETTING)
# transform modar to ego frame
ego_se3_lidar = ego_se3_glob @ glob_se3_lidar
modar[:, :7] = apply_se3_(ego_se3_lidar, boxes_=modar[:, :7], return_transformed=True)
# store agent's modar
exchange_boxes[lidar_id] = modar
exchange_metadata[lidar_id] = modar.shape[0]
# -----------------
# format output
# assemble datadict
input_dict = {
'points': np.zeros((1, 7)), # Dummy | (N_pts, 5 + 2) - point-5, sweep_idx, inst_idx
'gt_boxes': gt_boxes, # (N_inst, 7)
'gt_names': gt_names, # (N_inst,)
'frame_id': Path(info['lidar_path']).stem,
'metadata': {
'lidar_token': info['lidar_token'],
'num_sweeps_target': self.num_sweeps,
'sample_token': info['token'],
'lidar_id': 1,
'num_original': 0,
'exchange': exchange_metadata,
'exchange_boxes': exchange_boxes, # (N_boxes_tot, 7 + 2) - box-7, score, label
}
}
# data augmentation & other stuff
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
| quan-dao/practical-collab-perception | pcdet/datasets/v2x_sim/v2x_sim_dataset_ego_late.py | v2x_sim_dataset_ego_late.py | py | 6,411 | python | en | code | 5 | github-code | 36 |
42037688043 | """
Signal characteristics animation
"""
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib import gridspec
matplotlib.use('TkAgg')
class sigCharacterAnimation:
'''
Animation for signal characteristics
'''
# signal attributes
am1, am2 = None, None # signal amplitude, should <=1
f1, f2 = None, None # signal frequency, should >=1, multiple of 1
phase1, phase2 = None, None # signal phase, [0, 2pi)
t = None
sin1, cos1 = None, None # signal 1
sin2, cos2 = None, None # signal 2
# animation attributes
fig, axes = None, None
lines, point = None, None
frames = None
transFigure = None
def __init__(self, signal1=None, signal2=None):
if signal1 is None:
signal1 = [1, 1, 0]
if signal2 is None:
signal2 = [1, 2, np.pi / 2]
self.am1, self.am2 = signal1[0], signal2[0]
self.f1, self.f2 = signal1[1], signal2[1]
self.phase1, self.phase2 = signal1[2], signal2[2]
# generate signal
min_freq = min(self.f1, self.f2)
fs = 64 * min_freq
self.t = np.arange(0, 1 / min_freq, 1 / fs)
self.sin1 = self.am1 * np.sin(2 * np.pi * self.f1 * self.t + self.phase1)
self.cos1 = self.am1 * np.cos(2 * np.pi * self.f1 * self.t + self.phase1)
self.sin2 = self.am2 * np.sin(2 * np.pi * self.f2 * self.t + self.phase2)
self.cos2 = self.am2 * np.cos(2 * np.pi * self.f2 * self.t + self.phase2)
self.frames = len(self.t)
def start(self, save_gif=False, f_name='correlation'):
'''start animation or save as gif'''
self._init_animation()
anim = FuncAnimation(self.fig, self._animate, frames=self.frames, blit=False,
interval=100, init_func=self._init_canvas)
if save_gif:
if not os.path.exists('vis'):
os.makedirs('vis')
anim.save(os.path.join('vis', f_name + '.gif'), codec='png', writer='imagemagick')
else:
plt.show()
def _init_animation(self):
''' initialize animation'''
self.fig = plt.figure(figsize=(8, 6))
self.axes = [matplotlib.axes.Axes] * 4
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 2])
self.axes[0] = plt.subplot(gs[0, 0], aspect='equal')
self.axes[1] = plt.subplot(gs[0, 1])
self.axes[2] = plt.subplot(gs[1, 0], aspect='equal')
self.axes[3] = plt.subplot(gs[1, 1])
self.lines = [matplotlib.axes.Axes.plot] * 4
self.points = [matplotlib.axes.Axes.plot] * 4
self.connect = [matplotlib.lines.Line2D] * 2
circ1 = plt.Circle((0, 0), self.am1, fill=False, linewidth=2, color='orange')
circ2 = plt.Circle((0, 0), self.am2, fill=False, linewidth=2, color='green')
self.axes[0].add_artist(circ1)
self.axes[0].set_xlim(-1.1, 1.1)
self.axes[0].set_ylim(-1.1, 1.1)
self.axes[0].axis('off')
self.axes[1].set_xlim(-0.1, 1)
self.axes[1].set_ylim(-1.1, 1.1)
self.axes[1].axis('off')
self.axes[2].add_artist(circ2)
self.axes[2].set_xlim(-1.1, 1.1)
self.axes[2].set_ylim(-1.1, 1.1)
self.axes[2].axis('off')
self.axes[3].set_xlim(-0.1, 1)
self.axes[3].set_ylim(-1.1, 1.1)
self.axes[3].axis('off')
self.lines[0], = self.axes[0].plot([], [], linewidth=2)
self.lines[1], = self.axes[1].plot([], [], linewidth=2, color='orange')
self.lines[2], = self.axes[2].plot([], [], linewidth=2)
self.lines[3], = self.axes[3].plot([], [], linewidth=2, color='green')
self.points[0], = self.axes[0].plot([], [], 'ro', markersize=6)
self.points[1], = self.axes[1].plot([], [], 'ro', markersize=6)
self.points[2], = self.axes[2].plot([], [], 'ro', markersize=6)
self.points[3], = self.axes[3].plot([], [], 'ro', markersize=6)
self.connect[0] = matplotlib.lines.Line2D([], [], color='r', transform=self.fig.transFigure)
self.connect[1] = matplotlib.lines.Line2D([], [], color='r', transform=self.fig.transFigure)
self.fig.lines.extend(self.connect)
self.transFigure = self.fig.transFigure.inverted()
def _init_canvas(self):
'''do nothing, return artists to be re-drawn'''
return self.lines + self.points + self.connect
def _animate(self, i):
'''perform animation step'''
# update artists
self.lines[0].set_data([0, self.cos1[-i]], [0, self.sin1[-i]])
self.lines[1].set_data(self.t, np.roll(self.sin1, i))
self.lines[2].set_data([0, self.cos2[-i]], [0, self.sin2[-i]])
self.lines[3].set_data(self.t, np.roll(self.sin2, i))
self.points[0].set_data([self.cos1[-i]], [self.sin1[-i]])
self.points[1].set_data([0], [self.sin1[-i]])
self.points[2].set_data([self.cos2[-i]], [self.sin2[-i]])
self.points[3].set_data([0], [self.sin2[-i]])
coord1 = self.transFigure.transform(self.axes[0].transData.transform([self.cos1[-i], self.sin1[-i]]))
coord2 = self.transFigure.transform(self.axes[1].transData.transform([0, self.sin1[-i]]))
self.connect[0].set_data((coord1[0], coord2[0]), (coord1[1], coord2[1]))
coord1 = self.transFigure.transform(self.axes[2].transData.transform([self.cos2[-i], self.sin2[-i]]))
coord2 = self.transFigure.transform(self.axes[3].transData.transform([0, self.sin2[-i]]))
self.connect[1].set_data((coord1[0], coord2[0]), (coord1[1], coord2[1]))
return self.lines + self.points + self.connect
if __name__ == '__main__':
save_fig = True
# amplitude
# anim = sigCharacterAnimation(signal1=[1, 1, 0], signal2=[0.5, 1, 0])
# anim.start(save_gif=save_fig, f_name='sig_char_amplitude')
# # frequency
# anim = sigCharacterAnimation(signal1=[1, 1, 0], signal2=[1, 2, 0])
# anim.start(save_gif=save_fig, f_name='sig_char_frequency')
# phase
anim = sigCharacterAnimation(signal1=[1, 1, 0], signal2=[1, 1, np.pi / 2])
anim.start(save_gif=save_fig, f_name='sig_char_phase')
| PenroseWang/SimGPS | code/utils/signal_characteristic.py | signal_characteristic.py | py | 6,216 | python | en | code | 11 | github-code | 36 |
3206852190 | """ Provides the different Bounds that are used by the Table to determine
the Cells that are adjacent to the Table. """
from __future__ import annotations
from _operator import attrgetter
from itertools import cycle
from typing import Callable, cast, Iterable, NamedTuple, Protocol, TypeVar
from pdf2gtfs.config import Config
from pdf2gtfs.datastructures.pdftable.bbox import BBox
from pdf2gtfs.datastructures.table.direction import Direction, E, N, S, W
from pdf2gtfs.datastructures.table.cell import C, Cs
B = TypeVar("B", bound="Bounds")
class F(Protocol):
""" Used as a type to typecheck min/max functions correctly. """
def __call__(self, cells: Iterable[C] | Iterable[BBox],
key: Callable[[C | BBox], float]) -> C:
pass
# Arguments used by the N-/S-/W-/EBounds.
# — func: The function (min / max) used to determine the correct limit.
# — direction: The Direction of the limit.
BoundArg = NamedTuple("BoundArg", [("func", F), ("direction", Direction)])
class Bounds:
""" Basic Bounds, where not all limits necessarily exist. """
d: Direction = None
def __init__(self, n: float | None, w: float | None,
s: float | None, e: float | None) -> None:
self._n = n
self._w = w
self._s = s
self._e = e
self._update_hbox()
self._update_vbox()
@classmethod
def from_bboxes(cls, bboxes: list[BBox], *,
n: BoundArg | None = None, w: BoundArg | None = None,
s: BoundArg | None = None, e: BoundArg | None = None
) -> B:
""" Create a new Bounds from the BBoxes, based on which args are given.
:param bboxes: The BBoxes used for construction.
:param n: The northern BoundArg. None for NBounds.
:param w: The western BoundArg. None for WBounds.
:param s: The southern BoundArg. None for SBounds.
:param e: The eastern BoundArg. None for EBounds.
:return: A new Bounds created from the given BBoxes,
based on which BoundArgs are provided.
"""
return cls(n=get_limit_from_cells(bboxes, n),
w=get_limit_from_cells(bboxes, w),
s=get_limit_from_cells(bboxes, s),
e=get_limit_from_cells(bboxes, e))
@classmethod
def overlaps_any(cls, cells: Cs, c2: C) -> bool:
""" Check if the given Cell overlaps with any of the given Cells.
The overlap function is determined by cls.
:param cells: Use these Cells to check overlap.
:param c2: The Cell that is checked.
:return: True if the Cell overlaps. False, otherwise.
"""
func = getattr(c2.bbox, cls.d.o.overlap_func)
for c1 in cells:
if func(c1.bbox, 0.8):
return True
return False
@classmethod
def select_adjacent_cells(cls, border: list[BBox], cells: Cs) -> Cs:
""" Select those Cells that are adjacent to the border BBoxes.
:param border: The row/col of a Table that is used to determine
if a Cell is adjacent to the Table.
:param cells: The Cells that are checked for adjacency.
:return: Those Cells, which are adjacent to the Table.
"""
def get_all_adjacent_cells() -> Cs:
""" Get Cells that fit only three bounds,
but are overlapping with Cells that overlap all four.
If we are extra_greedy, also get those cells that recursively
overlap with cells that overlap with other cells.
"""
# Need to shallow copy for overlap_cells to be different.
all_cells = list(min_cells)
overlap_cells = all_cells if Config.extra_greedy else min_cells
while True:
new_cells = [c for c in cells
if cls.overlaps_any(overlap_cells, c)
and c not in all_cells]
if not new_cells:
break
all_cells += new_cells
return all_cells
# Get the three basic bounds, which are created from the border.
bounds = cls.from_bboxes(border)
cells = list(filter(bounds.within_bounds, cells))
if not cells:
return []
bounds.update_missing_bound(cells)
# These are the Cells that fit all bounds.
min_cells = list(filter(bounds.within_bounds, cells))
adjacent_cells = get_all_adjacent_cells()
# Sort columns by y0 and rows by x0.
lower_coord = attrgetter(f"bbox.{cls.d.o.normal.lower.coordinate}")
return list(sorted(adjacent_cells, key=lower_coord))
@property
def n(self) -> float | None:
""" The northern bound, i.e., y0/the lowest y coordinate. """
return self._n
@n.setter
def n(self, value: float | None) -> None:
self._n = value
self._update_vbox()
@property
def s(self) -> float | None:
""" The southern bound, i.e., y1/the largest y coordinate. """
return self._s
@s.setter
def s(self, value: float | None) -> None:
self._s = value
self._update_vbox()
@property
def w(self) -> float | None:
""" The western bound, i.e., x0/the lowest x coordinate. """
return self._w
@w.setter
def w(self, value: float | None) -> None:
self._w = value
self._update_hbox()
@property
def e(self) -> float | None:
""" The eastern bound, i.e., x1/the largest y coordinate. """
return self._e
@e.setter
def e(self, value: float | None) -> None:
self._e = value
self._update_hbox()
@property
def hbox(self) -> BBox | None:
""" The horizontal BBox, using only w/e. """
return self._hbox
@property
def vbox(self) -> BBox | None:
""" The vertical BBox, using only n/s. """
return self._vbox
def _update_hbox(self) -> None:
if self.w is None or self.e is None:
hbox = None
else:
hbox = BBox(self.w, -1, self.e, -1)
self._hbox = hbox
def _update_vbox(self) -> None:
if self.n is None or self.s is None:
vbox = None
else:
vbox = BBox(-1, self.n, -1, self.s)
self._vbox = vbox
def within_h_bounds(self, bbox: BBox) -> bool:
""" Check if the given BBox is within the current Bounds, horizontally.
:param bbox: The BBox that is checked.
:return: True if the BBox is within Bounds. False, otherwise.
"""
if self.hbox and self.hbox.is_h_overlap(bbox, 0.5):
return True
if self.hbox:
return False
if self.w is not None and bbox.x1 <= self.w:
return False
if self.e is not None and bbox.x0 >= self.e:
return False
return True
def within_v_bounds(self, bbox: BBox) -> bool:
""" Check if the given BBox is within the current Bounds, vertically.
:param bbox: The BBox that is checked.
:return: True if the BBox is within Bounds. False, otherwise.
"""
if self.vbox and self.vbox.is_v_overlap(bbox, 0.5):
return True
if self.vbox:
return False
if self.n is not None and bbox.y1 <= self.n:
return False
if self.s is not None and bbox.y0 >= self.s:
return False
return True
def within_bounds(self, cell: C) -> bool:
""" Check if the Cell is within the bounds.
If the hbox/vbox is None, that is, if at least one of the w/e or n/s
coordinates is None, the check will not fail immediately.
Instead, in that case, only the existing (if any) coordinate will
be checked.
:param cell: The Cell that is checked.
:return: True, if obj is within both the hbox and the vbox.
False, otherwise.
"""
bbox = cell.bbox
return self.within_h_bounds(bbox) and self.within_v_bounds(bbox)
def merge(self, bounds: B) -> None:
""" Merge the Bounds, such that the resulting Bounds contain both.
:param bounds: The Bounds that is merged into this one.
"""
# n/w use min for lower bound, s/e use max for larger bound.
for coordinate, func in zip("nswe", cycle((min, max))):
getter = attrgetter(coordinate)
value = func(map(getter, (self, bounds)), default=None)
setattr(self, coordinate, value)
def _update_single_limit(
self, which: str, arg: BoundArg, cells: list[C]) -> None:
""" Update a single bound using the BoundArg and the Cells.
:param which: Can be one of "n", "w", "s", "e".
:param arg: The BoundArg that is used to determine the limit.
:param cells: The Cells used to calculate the limit.
"""
setattr(self, which, get_limit_from_cells(cells, arg))
def __repr__(self) -> str:
cls_name = self.__class__.__name__
fmt = "{: >7.2f}"
n = fmt.format(self.n) if self.n is not None else "None"
w = fmt.format(self.w) if self.w is not None else "None"
s = fmt.format(self.s) if self.s is not None else "None"
e = fmt.format(self.e) if self.e is not None else "None"
return f"{cls_name}(n={n}, w={w}, s={s}, e={e})"
class WBounds(Bounds):
""" The western outer bounds of a Table. Used when expanding a Table. """
d = W
@classmethod
def from_bboxes(cls, bboxes: list[BBox], **_) -> WBounds:
n = BoundArg(min, N)
s = BoundArg(max, S)
# We use the opposite Direction here, because we want the outer Bounds.
e = BoundArg(min, E.opposite)
return super().from_bboxes(bboxes, n=n, s=s, e=e)
def update_missing_bound(self, cells: list[C]) -> None:
""" Add the missing bound (western) based on the given Cells. """
args: BoundArg = BoundArg(max, W)
self._update_single_limit("w", args, cells)
class EBounds(Bounds):
""" The eastern outer bounds of a Table. Used when expanding a Table. """
d = E
@classmethod
def from_bboxes(cls, bboxes: list[BBox], **_) -> EBounds:
n = BoundArg(min, N)
s = BoundArg(max, S)
# We use the opposite Direction here, because we want the outer Bounds.
w = BoundArg(max, W.opposite)
return super().from_bboxes(bboxes, n=n, w=w, s=s)
def update_missing_bound(self, cells: list[C]) -> None:
""" Add the missing bound (eastern) based on the given ells. """
args: BoundArg = BoundArg(min, E)
self._update_single_limit("e", args, cells)
class NBounds(Bounds):
""" The northern outer bounds of a Table. Used when expanding a Table. """
d = N
@classmethod
def from_bboxes(cls, bboxes: list[BBox], **_) -> NBounds:
w = BoundArg(min, W)
# We use the opposite Direction here, because we want the outer Bounds.
s = BoundArg(min, S.opposite)
e = BoundArg(max, E)
return super().from_bboxes(bboxes, w=w, s=s, e=e)
def update_missing_bound(self, cells: list[C]) -> None:
""" Add the missing bound (northern) based on the given Cells. """
args: BoundArg = BoundArg(max, N)
self._update_single_limit("n", args, cells)
class SBounds(Bounds):
""" The southern outer bounds of a Table. Used when expanding a Table. """
d = S
@classmethod
def from_bboxes(cls, bboxes: list[BBox], **_) -> SBounds:
# We use the opposite Direction here, because we want the outer Bounds.
n = BoundArg(max, N.opposite)
w = BoundArg(min, W)
e = BoundArg(max, E)
return super().from_bboxes(bboxes, n=n, w=w, e=e)
def update_missing_bound(self, cells: list[C]) -> None:
""" Add the missing bound (southern) based on the given Cells. """
args: BoundArg = BoundArg(min, S)
self._update_single_limit("s", args, cells)
def get_limit_from_cells(objects: list[C] | list[BBox], arg: BoundArg | None
) -> float | None:
""" Calculate a limit from the Cells using the provided func and attr.
:param objects: The Cells/BBoxes used to calculate the limit.
:param arg: The BoundArg used to determine the limit.
:return: The limit of the Cells, based on the given func and d.
"""
if not objects or not arg:
return None
prefix = "bbox." if hasattr(objects[0], "bbox") else ""
getter = attrgetter(prefix + arg.direction.coordinate)
# Get the Cell/BBox that has the highest/lowest value for c.
limit = arg.func(objects, key=getter)
# Get the actual value.
return cast(float, getter(limit))
def select_adjacent_cells(d: Direction, bboxes: list[BBox], cells: Cs) -> Cs:
""" Get all Cells adjacent in d to the given reference Cells.
:param d: The Direction to check for adjacency in.
:param bboxes: The BBoxes used to check for adjacency.
:param cells: The Cells that are checked for adjacency.
:return: The Cells that are adjacent to ref_cells.
"""
bound_cls = {N: NBounds, W: WBounds, S: SBounds, E: EBounds}[d]
adjacent_cells: Cs = bound_cls.select_adjacent_cells(bboxes, cells)
normal = d.o.normal
# Remove Cells that are not overlapping with any reference Cell.
starter_id = 0
for adj_cell in adjacent_cells:
for i, bbox in enumerate(bboxes[starter_id:], starter_id):
if adj_cell.bbox.is_overlap(normal.name, bbox):
break
else:
adjacent_cells.remove(adj_cell)
break
starter_id = i
return adjacent_cells
| heijul/pdf2gtfs | src/pdf2gtfs/datastructures/table/bounds.py | bounds.py | py | 13,748 | python | en | code | 1 | github-code | 36 |
8639831743 | SHAPE = "shapeBean"
COLOR = "colorBean"
SIZE_X = "sizeXBean"
SIZE_Y = "sizeYBean"
SIZE_Z = "sizeZBean"
RADIUS = "radiusBean"
POSITION_X = "positionXBean"
POSITION_Y = "positionYBean"
POSITION_Z = "positionZBean"
ROTATION_X = "rotationXBean"
ROTATION_Y = "rotationYBean"
ROTATION_Z = "rotationZBean"
GEOMETRY = "geometryFileBean"
# Bean attributes
VALUE = "value"
UUID = "uuid"
OVERRIDE = "override"
NAME = "name"
TYPE = "type"
CHILDREN = "children"
TYPE_VIS = 'de.dlr.sc.virsat.model.extension.visualisation.Visualisation'
| virtualsatellite/VirtualSatellite4-FreeCAD-mod | VirtualSatelliteCAD/plugins/VirtualSatelliteRestPlugin/virsat_constants.py | virsat_constants.py | py | 525 | python | en | code | 9 | github-code | 36 |
43008955586 | import os
import sys
from sqlobject.compat import load_module_from_file
def load_module(module_name):
mod = __import__(module_name)
components = module_name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def load_module_from_name(filename, module_name):
if module_name in sys.modules:
return sys.modules[module_name]
init_filename = os.path.join(os.path.dirname(filename), '__init__.py')
if not os.path.exists(init_filename):
try:
f = open(init_filename, 'w')
except (OSError, IOError) as e:
raise IOError(
'Cannot write __init__.py file into directory %s (%s)\n'
% (os.path.dirname(filename), e))
f.write('#\n')
f.close()
if module_name in sys.modules:
return sys.modules[module_name]
if '.' in module_name:
parent_name = '.'.join(module_name.split('.')[:-1])
base_name = module_name.split('.')[-1]
load_module_from_name(os.path.dirname(filename), parent_name)
else:
base_name = module_name
return load_module_from_file(base_name, module_name, filename)
| sqlobject/sqlobject | sqlobject/util/moduleloader.py | moduleloader.py | py | 1,175 | python | en | code | 140 | github-code | 36 |
20078827999 | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from api.serializers import BookSerializer
from books.models import Book, PublicationLanguage, Author
from django.test import Client
client = Client()
class TestBookList(APITestCase):
def setUp(self) -> None:
self.publication_language_id = PublicationLanguage.objects.get_or_create(
language="en")[0]
self.author_id = Author.objects.get_or_create(name="test_author")[0]
def test_empty_list_books(self):
response = client.get(reverse("api:book_list"))
books = Book.objects.all()
serializer = BookSerializer(books, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_all_books(self):
book = Book.objects.create(
isbn="1234567891234",
title="test_title",
publication_year=2008,
page_count=1000,
cover="https://google.pl",
publication_language=self.publication_language_id)
book.author.set([self.author_id])
response = client.get(reverse("api:book_list"))
books = Book.objects.all()
serializer = BookSerializer(books, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_filter_publication_date(self):
book_first = Book.objects.create(
isbn="1234567891234",
title="test_title",
publication_year=2008,
page_count=2008,
cover="https://google.pl",
publication_language=self.publication_language_id)
book_first.author.set([self.author_id])
book_second = Book.objects.create(
isbn="1234567891235",
title="test_title123",
publication_year=2018,
page_count=2008,
cover="https://google.pl",
publication_language=self.publication_language_id)
book_second.author.set([self.author_id])
url = f"{reverse('api:book_list')}?publication_year__gte=&publication_year__lte=2013"
response = client.get(url)
books = Book.objects.filter(publication_year__lte=2013)
serializer = BookSerializer(books, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| tomasz-rzesikowski/books_poc | api/tests/tests_views.py | tests_views.py | py | 2,504 | python | en | code | 0 | github-code | 36 |
33133914412 | # Google Question
# Given an array = [2, 5, 1, 2, 3, 5, 1, 2, 4]
# It should return 2
# Given an array = [2, 1, 1, 2, 3, 5, 1, 2, 4]
# It should return 1
# Given an array = [2, 3, 4, 5]
# It should return undefined
# input:
# array - always an array of integers
# negative and positive
# no size limit
# can be empty or None
# output:
# integer
# find the first recurring element
# if not -> None
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1]
def first_recurring_character(arr):
smaller_index_diff = len(arr) - 1
element = None
for i in range(len(arr)):
for j in range(i + 1, len(arr)):
if arr[i] == arr[j] and (j - i) < smaller_index_diff:
smaller_index_diff = j - i
element = arr[i]
return element
print(first_recurring_character([2, 1, 1, 2, 3, 5, 1, 2, 4]))
# O(n) - Time Complexity
# O(n) - Space Complexity
def first_recurring_character2(arr):
my_map = {}
for i in range(len(arr)):
if arr[i] in my_map.keys():
return arr[i]
else:
my_map[arr[i]] = i
return None
# print(first_recurring_character2([1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1]))
| Iuri-Almeida/ZTM-Data-Structures-and-Algorithms | data-structures/hash-tables/first_recurring_character.py | first_recurring_character.py | py | 1,238 | python | en | code | 0 | github-code | 36 |
73339180903 | import asyncio
import json
from datetime import datetime
import aiohttp
from pydantic import BaseModel, Field, NonNegativeFloat
from faststream import ContextRepo, FastStream, Logger
from faststream.kafka import KafkaBroker
broker = KafkaBroker("localhost:9092")
app = FastStream(broker)
class CryptoPrice(BaseModel):
price: NonNegativeFloat = Field(
..., examples=[50000.0], description="Current price of cryptocurrency in USD"
)
crypto_currency: str = Field(
..., examples=["BTC"], description="The cryptocurrency"
)
publisher = broker.publisher("new_crypto_price")
async def fetch_crypto_price(
url: str, crypto_currency: str, logger: Logger, context: ContextRepo, time_interval: int = 2
) -> None:
# Always use context: ContextRepo for storing app_is_running variable
while context.get("app_is_running"):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.json()
price = data["data"]["amount"]
new_crypto_price = CryptoPrice(
price=price, crypto_currency=crypto_currency
)
await publisher.publish(
new_crypto_price,
key=crypto_currency.encode("utf-8"),
)
else:
logger.warning(
f"Failed API request {url} at time {datetime.now()}"
)
await asyncio.sleep(time_interval)
@app.on_startup
async def app_setup(context: ContextRepo):
context.set_global("app_is_running", True)
@app.on_shutdown
async def shutdown(context: ContextRepo):
context.set_global("app_is_running", False)
# Get all the running tasks and wait them to finish
fetch_tasks = context.get("fetch_tasks")
await asyncio.gather(*fetch_tasks)
@app.after_startup
async def publish_crypto_price(logger: Logger, context: ContextRepo):
logger.info("Starting publishing:")
cryptocurrencies = [("Bitcoin", "BTC"), ("Ethereum", "ETH")]
fetch_tasks = [
asyncio.create_task(
fetch_crypto_price(
f"https://api.coinbase.com/v2/prices/{crypto_currency}-USD/spot",
crypto_currency,
logger,
context,
)
)
for _, crypto_currency in cryptocurrencies
]
# you need to save asyncio tasks so you can wait them to finish at app shutdown (the function with @app.on_shutdown function)
context.set_global("fetch_tasks", fetch_tasks) | airtai/faststream-gen | docs_src/tutorial/retrieve-publish-crypto/app/application.py | application.py | py | 2,687 | python | en | code | 19 | github-code | 36 |
4705152598 | import numpy as np
import pandas as pd
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
try:
from sklearn.base import TransformerMixin, BaseEstimator
except ImportError:
msg = "scikit-learn not installed"
logger.warning(msg)
try:
from fancyimpute import IterativeImputer, SoftImpute
except ImportError:
msg = "fancyimpute not installed"
logger.warning(msg)
class MultipleImputer(BaseEstimator, TransformerMixin):
"""
Multiple Imputation via fancyimpute.IterativeImputer.
"""
def __init__(self, multiple=5, n_iter=10, groupby=None, *args, **kwargs):
self.multiple = multiple
self.n_iter = n_iter
self.args = args
self.kwargs = kwargs
self.groupby = groupby
def transform(self, X, *args, **kwargs):
assert isinstance(X, pd.DataFrame)
df = pd.DataFrame(columns=X.columns, index=X.index)
if isinstance(self.imputers, dict):
for c, d in self.imputers.items():
mask = d["mask"]
imputers = d["impute"]
imputed_data = np.array([imp.transform(X[mask, :]) for imp in imputers])
mean = np.mean(imputed_data, axis=0)
df.loc[mask, ~pd.isnull(X[mask, :]).all(axis=0)] = mean
return df
else:
imputed_data = np.array([imp.transform(X) for imp in self.imputers])
mean = np.mean(imputed_data, axis=0)
df.loc[:, ~pd.isnull(X).all(axis=0)] = mean
return df
"""
def inverse_transform(self, Y, *args, **kwargs):
# For non-compositional data, take the mask and reverting to nan
# for compositional data, renormalisation would be needed
pass
"""
def fit(self, X, y=None):
assert isinstance(X, pd.DataFrame)
start = X
y_present = y is not None
groupby_present = self.groupby is not None
self.imputers = []
if y_present or groupby_present:
assert not (groupby_present and y_present)
if y_present:
classes = np.unique(y)
gen_mask = lambda c: y == c
if groupby_present:
classes = X[self.groupby].unique()
gen_mask = lambda c: X[self.groupby] == c
self.imputers = {
c: {
"impute": [
IterativeImputer(
n_iter=self.n_iter,
sample_posterior=True,
random_state=ix,
**self.kwargs
)
for ix in range(self.multiple)
],
"mask": gen_mask(c),
}
for c in classes
}
msg = """Imputation transformer: {} imputers x {} classes""".format(
self.multiple, len(classes)
)
logger.info(msg)
for c, d in self.imputers.items():
for imp in d["impute"]:
imp.fit(X[d["mask"], :])
else:
for ix in range(self.multiple):
self.imputers.append(
IterativeImputer(
n_iter=self.n_iter,
sample_posterior=True,
random_state=ix,
**self.kwargs
)
)
msg = """Imputation transformer: {} imputers""".format(self.multiple)
logger.info(msg)
for ix in range(self.multiple):
self.imputers[ix].fit(X)
return self
class PdSoftImputer(BaseEstimator, TransformerMixin):
"""
Multiple Imputation via fancyimpute.SoftImpute.
"""
def __init__(self, max_iters=100, groupby=None, donotimpute=[], *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.max_iters = max_iters
self.groupby = groupby
self.donotimpute = donotimpute
def transform(self, X, *args, **kwargs):
"""
Impute Missing Values
Todo
------
* Need to use masks to avoid :class:`fancyimpute.SoftImpute` returning 0. where it cannot impute.
"""
assert isinstance(X, pd.DataFrame)
df = pd.DataFrame(columns=X.columns, index=X.index) # df of nans
df.loc[:, self.donotimpute] = X.loc[:, self.donotimpute]
to_impute = [i for i in X.columns if not i in self.donotimpute]
imputable = ~pd.isnull(X.loc[:, to_impute]).all(axis=1)
if isinstance(self.imputer, dict):
for c, d in self.imputer.items():
mask = d["mask"]
mask = mask & imputable
imputer = d["impute"]
imputed_data = imputer.fit_transform(X.loc[mask, to_impute])
assert imputed_data.shape[0] == X.loc[mask, :].index.size
df.loc[mask, to_impute] = imputed_data
return df
else:
imputed_data = self.imputer.fit_transform(X.loc[imputable, to_impute])
assert imputed_data.shape[0] == X.loc[imputable, :].index.size
df.loc[imputable, to_impute] = imputed_data
return df
"""
def inverse_transform(self, Y, *args, **kwargs):
# For non-compositional data, take the mask and reverting to nan
# for compositional data, renormalisation would be needed
pass
"""
def fit(self, X, y=None):
assert isinstance(X, pd.DataFrame)
start = X
y_present = y is not None
groupby_present = self.groupby is not None
self.imputer = []
if y_present or groupby_present:
assert not (groupby_present and y_present)
if y_present:
classes = np.unique(y)
gen_mask = lambda c: y == c
if groupby_present:
classes = X[self.groupby].unique()
gen_mask = lambda c: X[self.groupby] == c
self.imputer = {
c: {
"impute": SoftImpute(max_iters=self.max_iters, **self.kwargs),
"mask": gen_mask(c),
}
for c in classes
}
msg = """Building Soft Imputation Transformers for {} classes""".format(
len(classes)
)
logger.info(msg)
else:
self.imputer = SoftImpute(max_iters=self.max_iters, **self.kwargs)
msg = """Building Soft Imputation Transformer"""
logger.info(msg)
return self
| skerryvore/pyrolite | pyrolite/util/skl/impute.py | impute.py | py | 6,714 | python | en | code | null | github-code | 36 |
32413884802 | import pytest
import torch
from renate.benchmark.models.transformer import HuggingFaceSequenceClassificationTransformer
@pytest.mark.parametrize("model_name", ["distilbert-base-uncased", "bert-base-uncased"])
def test_init(model_name):
HuggingFaceSequenceClassificationTransformer(
pretrained_model_name_or_path=model_name, num_outputs=10
)
@pytest.mark.parametrize(
"model_name,input_dim",
[
["distilbert-base-uncased", (128,)],
["bert-base-uncased", (256,)],
],
)
def test_text_transformer_fwd(model_name, input_dim):
transformer = HuggingFaceSequenceClassificationTransformer(
pretrained_model_name_or_path=model_name
)
x = {"input_ids": torch.randint(0, 30000, (5, *input_dim))}
y_hat = transformer(x)
assert y_hat.shape[0] == 5
assert y_hat.shape[1] == 10
| awslabs/Renate | test/renate/benchmark/models/test_text_transformer.py | test_text_transformer.py | py | 844 | python | en | code | 251 | github-code | 36 |
71656889065 | import torch
import torchaudio
from torchaudio.transforms import MelSpectrogram, Spectrogram
def load_wav_to_torch(full_path, hop_size=0, slice_train=False):
wav, sampling_rate = torchaudio.load(full_path, normalize=True)
if not slice_train:
p = (wav.shape[-1] // hop_size + 1) * hop_size - wav.shape[-1]
wav = torch.nn.functional.pad(wav, (0, p), mode="constant").data
return wav.squeeze(0), sampling_rate
class SpectrogramFixed(torch.nn.Module):
"""In order to remove padding of torchaudio package + add log10 scale."""
def __init__(self, **kwargs):
super(SpectrogramFixed, self).__init__()
self.torchaudio_backend = Spectrogram(**kwargs)
def forward(self, x):
outputs = self.torchaudio_backend(x)
return outputs[..., :-1]
class MelSpectrogramFixed(torch.nn.Module):
"""In order to remove padding of torchaudio package + add log10 scale."""
def __init__(self, **kwargs):
super(MelSpectrogramFixed, self).__init__()
self.torchaudio_backend = MelSpectrogram(**kwargs)
def forward(self, x):
outputs = torch.log(self.torchaudio_backend(x) + 0.001)
return outputs[..., :-1]
def torch_wav2spec(wav_fn, fft_size, hop_size, win_length, num_mels, fmin, fmax, sample_rate):
""" Waveform to linear-spectrogram and mel-sepctrogram. """
# Read wavform
wav, sr = load_wav_to_torch(wav_fn, hop_size, slice_train=False)
if sr != sample_rate:
raise ValueError(f"{sr} SR doesn't match target {sample_rate} SR")
if torch.min(wav) < -1.:
print('min value is ', torch.min(wav))
if torch.max(wav) > 1.:
print('max value is ', torch.max(wav))
# Spectrogram process
spec_fn = SpectrogramFixed(n_fft=fft_size, win_length=win_length, hop_length=hop_size,
window_fn=torch.hann_window).to(device=wav.device)
spec = spec_fn(wav)
# Mel-spectrogram
mel_fn = MelSpectrogramFixed(sample_rate=sample_rate, n_fft=fft_size, win_length=win_length,
hop_length=hop_size, f_min=fmin, f_max=fmax, n_mels=num_mels,
window_fn=torch.hann_window).to(device=wav.device)
mel = mel_fn(wav)
# Wav-processing
wav = wav.squeeze(0)[:mel.shape[-1]*hop_size]
# Check wav and spectorgram
assert wav.shape[-1] == mel.shape[-1] * hop_size, f"| wav: {wav.shape}, spec: {spec.shape}, mel: {mel.shape}"
assert mel.shape[-1] == spec.shape[-1], f"| wav: {wav.shape}, spec: {spec.shape}, mel: {mel.shape}"
return {"wav": wav.cpu().detach().numpy(), "linear": spec.squeeze(0).T.cpu().detach().numpy(),
"mel": mel.squeeze(0).T.cpu().detach().numpy()}
| jisang93/VISinger | utils/audio/mel_processing.py | mel_processing.py | py | 2,724 | python | en | code | 13 | github-code | 36 |
14521287997 | #imports the os package and inotify
import os
from inotify_simple import INotify, flags
#pulls in the package
inotify = INotify()
#runs the below command so this script will keep running once it's finished
os.system("while :; do python3 File-Changes.py; done")
#creates the watch flags
watch_flags = flags.CREATE | flags.DELETE | flags.MODIFY | flags.DELETE_SELF
#creates watches for the below directory
wdVarLog = inotify.add_watch("/var/log", watch_flags)
wdETC = inotify.add_watch("/etc", watch_flags)
#for event in inotify prints to directorylogs
for event in inotify.read():
fOpen = open("directorylogs.txt","a")
fOpen.write(event)
fOpen.close()
#for flag in event prints out to the same file as the events
for flag in flags.from_mask(event.mask):
fstring = (" " + str(flag))
fOpen = open("directorylogs.txt","a")
fOpen.write(fstring)
fOpen.close()
| Splixxy/Cron-Job | File-Changes.py | File-Changes.py | py | 907 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.