seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11357000023 | import pygame
class Heart(pygame.sprite.Sprite):
def __init__(self, imgfile, hp, pos=[]):
super().__init__()
self.heart_img = pygame.image.load(imgfile)
self.rect = self.heart_img.get_rect()
self.pos = pos
self.HP = hp
self.speed = 7
self.Invincible = 0 #受到攻击后的无敌时间
# 裂开的特效(待完善) ;return 是否活着
def is_dead(self):
if self.HP <= 0:
return True
else:
return False
# 居中画图
def target_pos(self, pos):
pos = (pos[0]-self.rect[-2]//2,pos[1]-self.rect[-1]//2)
return pos
| boaoqian/undertale_fight_system | Heart.py | Heart.py | py | 662 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 7,
"usage_type": "attribute"
}
] |
21797734599 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Conjunto de funciones desarrolladas para ser utilizadas en los programas principales
'''
import numpy as np
import torch
import matplotlib.pyplot as plt
from labcomdig import gray2de
from network import *
'''
NUMPY
'''
def transmisorpamV2(Bn,Eb,M,p,L):
"""
[Xn,Bn,An,phi,alfabetopam] = transmisorpamV2(Bn,Eb,M,p,L)
Entradas:
Bn = Secuencia de dígitos binarios
Eb = Energía media por bit transmitida en Julios
M = Número de síímbolos del código PAM
p = Pulso paso de baja o paso de banda
L = Número de puntos a utilizar en la representación de un símbolo
Devuelve:
Xn = la señal de información (discreta)
Bn = La secuencia de dígitos binarios realmente transmitidos
An = La secuencia de niveles de amplitud transmitidos
phi = Pulso básico real normalizado (energía unidad)
alfabetopam = Los niveles de amplitud asociados a cada símbolo
"""
# Comprobación de parámetros de entrada
p=np.reshape(p,[np.size(p)]) # <= #p=p.squeeze()
if len(Bn)<1 or Eb<=0 or M<2 or np.dot(p,p)==0 or L<1:
raise Exception('Error: revise los parámetros de entrada')
# Se obtienen en primer lugar los niveles asociado a cada símbolo ¿Cuántos bits hay en cada símbolo?
k = int(np.ceil(np.log2(M)))
M = 2**(k) # Se Ajusta M a una potencia de dos
# El alfabeto [Ver la ecuación (4.21)]
alfabetopam = np.sqrt(3*Eb*np.log2(M)/(M**2-1))*(2*(np.arange(M))-M+1)
# Si Bn no tiene una longitud múltiplo de k, se completa con ceros
Nb = len(Bn) # Número de bits a transmitir, actualizado
Bn = Bn.squeeze().astype(int) #Comprobación de int y dimensiones
Bn = np.r_[Bn,np.zeros(int(k*np.ceil(Nb/k)-Nb)).astype(int)] #
Nb = len(Bn) # Número de bits a transmitir tras la corrección
Ns = Nb//k # Número de símbolos a transmitir
# La secuencia generada
if M>2:
An = alfabetopam[gray2de(np.reshape(Bn,[Ns,k]))]
else:
An = alfabetopam[Bn]
# Comprobación de las longitudes y otros datos del pulso suministrado para
# hacer que el número de muestras del mismo sea efectivamente L
Ls = len(p)
if Ls<L:
p = np.r_[p, np.zeros(L-Ls)]
elif Ls>L:
print('La duración del pulso se trunca a {} muestras'.format(str(L)))
p = p[:L] #Debe modificarse si se quiere un pulso de más de L muestras
# Se normaliza la energía del pulso para obtener la base del sistema
phi = p / np.sqrt(p@p)
# Obtención del tren de pulsos, Xn = np.kron(An,phi) ó
Xn = np.reshape(np.reshape(An,[Ns,1])*phi,[Ns*L,]) #Debe modificarse si se quiere un pulso de más de L muestras
return [Xn,Bn,An,phi,alfabetopam]
def de2Nary(d,n,N):
"""
b = de2Nary(d,n,N)
Convierte un número decimal, d, en un vector binario, b, de longitud n
con base N
"""
c = np.zeros([len(d),int(n)])
for i in range(int(n)): d, c[:,i] = np.divmod(d,N)
c = np.fliplr(c);
return c.astype(int).T
'''
TORCH
'''
def t_Qfunct(x):
"""
y = Qfunct(x) evalúa la función Q en x.
Donde y = 1/sqrt(2*pi) * integral desde x hasta inf de exp(-t^2/2) dt
"""
y=(1/2)*torch.special.erfc(x/(torch.tensor(2)**.5))
return y
def t_gray2de(b):
"""
Convierte cada columna de la matriz formada por dígitos binarios b en un vector
fila de los valores decimales correspondientes, aplicando codificación de Gray.
"""
if not torch.is_tensor(b):
raise Exception('Error: la entrada no es un tensor')
b = b.long() #Aseguro que sea tipo int
c = torch.zeros_like(b); c[:,0] = b[:,0]
for i in range(1,b.size(dim=1)):
c[:,i] = torch.logical_xor(c[:,i-1], b[:,i])
c = torch.fliplr(c) # Convierte los bits menos significativos en los más significativos
#Comprueba un caso especial.
[n,m] = c.size()
if torch.min(torch.tensor((m,n))) < 1:
d = []
return
d = c @ 2**torch.arange(m)
return d
def t_de2gray(d,n):
"""
b = de2gray(d,n)
Convierte un número decimal, d, en un vector binario, b, de longitud n
"""
c = torch.zeros(len(d),int(n))
for i in range(int(n)):
c[:,i] = torch.fmod(d,2) #resto
d = torch.div(d,2,rounding_mode='floor') #cociente
c = torch.fliplr(c); b = torch.zeros_like(c); b[:,0] = c[:,0]; aux = b[:,0]
for i in range(1,int(n)):
b[:,i] = torch.logical_xor(aux, c[:,i])
aux = torch.logical_xor(b[:,i], aux)
return torch.reshape(b,[-1]).long()
def t_simbolobit(An,alfabeto):
"""
Bn = simbolobit(An, alfabeto)
An = secuencia de símbolos pertenecientes al alfabeto
alfabeto = tabla con los símbolos utilizados en la transmisión
Bn = una secuencia de bit, considerando que los símbolos se habían
generado siguiendo una codificación de Gray
"""
k = torch.log2(torch.tensor(len(alfabeto))) # bits por símbolo
if k>1:
distancia = abs(alfabeto[0]-alfabeto[1])
indices = torch.round((An-alfabeto[0])/distancia)
#Bn = torch.reshape(t_de2gray(indices,k),int(k*len(An)))
Bn = t_de2gray(indices,k)
else:
Bn = ((An/max(alfabeto))+1)/2
return Bn
def t_detectaSBF(r,alfabeto):
"""
An = detectaSBF(r,alfabeto)
r = secuencia a la entrada del detector, con estimaciones de s_i
alfabeto = tabla con los niveles de amplitud/símbolos
Genera:
An = una secuencia de símbolos pertenecientes al alfabeto de acuerdo con
una regla de distancia euclidiana mínima (mínima distancia)
"""
# Obtiene el índice respecto al alfabeto
r_repeated = torch.repeat_interleave(r.reshape(r.size(dim=0),1),alfabeto.size(dim=0),1)
ind = torch.argmin(torch.abs(r_repeated - alfabeto), 1)
# Genera la secuencia de niveles detectados
An = alfabeto[ind]
return An
def t_transmisorpamV2(Bn,Eb,M,p,L):
"""
[Xn,Bn,An,phi,alfabetopam] = transmisorpamV2(Bn,Eb,M,p,L)
Entradas:
Bn = Secuencia de dígitos binarios
Eb = Energía media por bit transmitida en Julios
M = Número de síímbolos del código PAM
p = Pulso paso de baja o paso de banda
L = Número de puntos a utilizar en la representación de un símbolo
Devuelve:
Xn = la señal de información (discreta)
Bn = La secuencia de dígitos binarios realmente transmitidos
An = La secuencia de niveles de amplitud transmitidos
phi = Pulso básico real normalizado (energía unidad)
alfabetopam = Los niveles de amplitud asociados a cada símbolo
"""
# Paso a tensores de los parámetros de entrada
Eb = torch.tensor(Eb)
M = torch.tensor(M)
L = torch.tensor(L)
# Comprobación de parámetros de entrada
p=torch.reshape(p,p.size()) # <= #p=p.squeeze()
if Bn.size(dim=1)<1 or Eb<=0 or M<2 or torch.dot(p,p)==0 or L<1:
raise Exception('Error: revise los parámetros de entrada')
# Se obtienen en primer lugar los niveles asociado a cada símbolo ¿Cuántos bits hay en cada símbolo?
k = torch.ceil(torch.log2(M))
M = 2**(k) # Se Ajusta M a una potencia de dos
# El alfabeto [Ver la ecuación (4.21)]
alfabetopam = torch.sqrt(3*Eb*torch.log2(M)/(M**2-1))*(2*(torch.arange(M))-M+1)
# Si Bn no tiene una longitud múltiplo de k, se completa con ceros
Nb = torch.tensor(Bn.size(dim=1)) # Número de bits a transmitir, actualizado
Bn = Bn.squeeze().long() #Comprobación de int y dimensiones
Bn = torch.hstack((Bn,torch.zeros((k*torch.ceil(Nb/k)-Nb).long()))) #
Nb = torch.tensor(Bn.size(dim=0)) # Número de bits a transmitir tras la corrección
Ns = torch.div(Nb,k,rounding_mode='trunc') # Número de símbolos a transmitir
# La secuencia generada
if M>2:
An = alfabetopam[t_gray2de(torch.reshape(Bn,(int(Ns),int(k))))]
else:
An = alfabetopam[Bn.long()]
# Comprobación de las longitudes y otros datos del pulso suministrado para
# hacer que el número de muestras del mismo sea efectivamente L
Ls = p.size(dim=0)
if Ls<L:
p = torch.hstack(p,torch.zeros(L-Ls))
elif Ls>L:
print('La duración del pulso se trunca a {} muestras'.format(str(L)))
p = p[:L] #Debe modificarse si se quiere un pulso de más de L muestras
# Se normaliza la energía del pulso para obtener la base del sistema
phi = p / torch.sqrt(p@p)
# Obtención del tren de pulsos, Xn = np.kron(An,phi) ó
a = torch.reshape(An,(int(Ns),1))*phi
Xn = torch.reshape(a,(int(Ns)*L,)) #Debe modificarse si se quiere un pulso de más de L muestras
return [Xn,Bn.long(),An,phi,alfabetopam]
def t_de2Nary(d,n,N):
"""
b = de2Nary(d,n,N)
Convierte un número decimal, d, en un vector binario, b, de longitud n
con base N
"""
c = torch.zeros(len(d),int(n))
for i in range(int(n)):
c[:,i] = torch.fmod(d,N) #resto
d = torch.div(d,N,rounding_mode='floor') #cociente
c = torch.fliplr(c);
return c.long().T
'''
NEURAL NETWORK
'''
def correction(Nb_train,Nb_test,k,M,n):
"""
[Nb_train,Ns_train,Nb_test,Ns_test] = correction(Nb_train,Nb_test,k,M,n)
Cambia el valor de Nb_train y Nb_test si es necesario para
que sea múltiplo de k y M**n
"""
Ns_train = int(torch.floor(torch.tensor(Nb_train/(k*M**n)))) #Number of symbols (multiple of k and M**n)
Nb_train = int(Ns_train*(k*M**n)) #Number of bits on transmission
Ns_train = int(torch.floor(torch.tensor(Nb_train/k))) #Number of symbols (multiple of k)
Ns_test = int(torch.floor(torch.tensor(Nb_test/k))) #Number of symbols (multiple of k)
Nb_test = int(Ns_test*k) #Number of bits on transmission
return [Nb_train,Ns_train,Nb_test,Ns_test]
def create_datasets(H,Nb_train,Nb_test,Eb,M,n,SNRdb,batch_size,valid_size):
"""
[trainloader,validloader,testloader,x,alphabet] = create_datasets(H,Nb_train,Nb_test,Eb,M,n,SNRdb,batch_size,valid_size)
Devuelve los conjuntos de datos listos para itererar, además del conjunto
de combinaciones posibles y el alfabeto de la constelación
"""
# Data generation for training
[Rn,Cn,Bn,x,alphabet] = generate_data(H,Nb_train,Eb,M,n,SNRdb,train=True)
trainset = SymbolsDataset(Rn,Cn,Bn)
# Data generation for testing
[Rn,Cn,Bn,x,alphabet] = generate_data(H,Nb_test,Eb,M,n,SNRdb,train=False)
testset = SymbolsDataset(Rn,Cn,Bn)
# Indixes used for validation
num_train = len(trainset)
split = int(np.floor(valid_size * num_train))
# Split data
train_split, valid_split = SymbolsDataset.split_data(trainset,split)
# Load training data in batches
trainloader = torch.utils.data.DataLoader(train_split,
batch_size=batch_size,
num_workers=0)
# Load validation data in batches
validloader = torch.utils.data.DataLoader(valid_split,
batch_size=batch_size,
num_workers=0)
# Load test data in batches
testloader = torch.utils.data.DataLoader(testset,
batch_size=batch_size,
num_workers=0)
return [trainloader,validloader,testloader,x,alphabet]
def generate_data(H,Nb,Eb,M,n,SNRdb,train):
"""
[Xn/Rn,Bn,Cn,x,alphabet] = generate_data(H,Nb,Eb,M,n,SNRdb,test)
Entradas:
H = Matriz del canal
Nb = Número de bits a transmitir
Eb = Energía media por bit transmitida en Julios
M = Número de símbolos del código PAM
n = Número de antenas del sistema MIMO nxn
SNRdb = Signal to Noise Ratio
test = Booleano para determinar si son datos para entrenar o testar
Devuelve:
Rn = La señal de información (discreta) recibida
Bn = La secuencia de dígitos binarios realmente transmitidos
Cn = La combinación correspondiente a cada valor de An
x = La matriz con las posibles combinaciones
alphabet = Los niveles de amplitud asociados a cada símbolo
"""
k = int(torch.log2(torch.tensor(M))) #Number of bits on each symbol
Ns = int(torch.floor(torch.tensor(Nb/k))) #Number of symbols (multiple of k)
alphabet = torch.sqrt(3*Eb*torch.log2(torch.tensor(M))/(M**2-1))*(2*(torch.arange(M))-M+1)
# possible combinations
ind = t_de2Nary(torch.arange(M**n),n,M)
x = alphabet[ind]
if train: # data for training. same number of symbols transmitted
Xn = x.tile(int(Ns/x.size(1)))
Xn = Xn[:,torch.randperm(Xn.size(1))] # Shuffle
Bn = t_simbolobit(Xn.flatten(),alphabet) #Detected bits
#Data reshape. Each row represents an antenna
Bn = Bn.reshape(n,Nb)
#Index of the combinations
Cn = getidx(x,Xn,n)
else: # data for testing
# fixed seed for same results always
torch.manual_seed(1)
bn = torch.randint(0,2,(1,Nb*n)) #Bits to transmit
[Xn,Bn,An,phi,alphabet] = t_transmisorpamV2(bn,Eb,M,torch.ones(1),1)
#Data reshape. Each row represents an antenna
Xn = Xn.reshape(n,Ns)
Bn = Bn.reshape(n,Nb)
An = An.reshape(n,Ns)
#Index of the combinations
Cn = getidx(x,An,n)
SNR = 10**(SNRdb/10) #Signal to Noise Ratio [n.u.]
varzn = Eb/(2*SNR) #Noise variance
#if varzn <= 0.0025: varzn = torch.tensor(0.0025)
Wn = torch.sqrt(varzn)*torch.randn(*Xn.shape) #AWGN
Rn = H@Xn + Wn
return [Rn,Cn,Bn.long(),x,alphabet]
def getidx(x,An,n):
"""
idx = getidx(x,An,n)
Función que retorna el número de combinación correspondiente a cada
valor de An
x = La matriz con las posibles combinaciones
An = La secuencia de niveles de amplitud transmitidos
n = El número de antenas
"""
idx = torch.empty(0)
for col in An.T:
i = (sum(x==col.view(n,1)) == n).nonzero(as_tuple=True)[0]
idx = torch.hstack((idx,i))
return idx.long()
def train_model(model, trainloader, validloader, optimizer, criterion, patience, n_epochs):
"""
[model, avg_train_losses, avg_valid_losses] = create_datasets(H,Nb_train,Nb_test,Eb,M,n,SNRdb,batch_size,valid_size):
Función que entrena a la red y retorna el modelo y los errores cometidos
"""
min_valid_loss = np.inf
es_counter = 0
# loss per batch
train_losses = []
valid_losses = []
# loss per epoch
avg_train_losses = []
avg_valid_losses = []
for epoch in range(1, n_epochs + 1):
# Train the model
model.train()
for symbols, combs, bits in trainloader:
# clear the gradients
optimizer.zero_grad()
# forward pass
output = model(symbols)
# calculate the loss
loss = criterion(output, combs)
# backward pass
loss.backward()
# perform a single optimization step
optimizer.step()
# record training loss
train_losses.append(loss.item())
# Validate the model
model.eval()
for symbols, combs, bits in validloader:
# forward pass
output = model(symbols)
# calculate the loss
loss = criterion(output, combs)
# record valid loss
valid_losses.append(loss.item())
# calculate average loss over an epoch
train_loss = np.average(train_losses)
valid_loss = np.average(valid_losses)
avg_train_losses.append(train_loss)
avg_valid_losses.append(valid_loss)
if min_valid_loss > valid_loss:
min_valid_loss = valid_loss
# Saving State Dict
torch.save(model.state_dict(), 'saved_model.pth')
else:
es_counter += 1
if es_counter == patience: break
# load saved model
model.load_state_dict(torch.load('saved_model.pth'))
# clear lists to track next epoch
train_losses = []
valid_losses = []
return [model, avg_train_losses, avg_valid_losses]
def printloss(avg_train_losses,avg_valid_losses,SNR,save):
"""
Función para mostrar por consola las pérdidas de entramiento y validación
en un epoch
"""
fig = plt.figure(1,figsize=(10,8))
plt.plot(range(1,len(avg_train_losses)+1),avg_train_losses, label='Training Loss')
plt.plot(range(1,len(avg_valid_losses)+1),avg_valid_losses,label='Validation Loss')
# find position of lowest validation loss
minposs = avg_valid_losses.index(min(avg_valid_losses))+1
plt.axvline(minposs, linestyle='--', color='r',label='Early Stopping Checkpoint')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.ylim(0, 2) # VARIABLE scale
plt.xlim(0, len(avg_valid_losses)+1) # consistent scale
plt.grid(True)
plt.legend()
plt.title('SNR = {}dB'.format(SNR))
plt.tight_layout()
plt.show()
if save: fig.savefig('loss_plot.png', bbox_inches='tight')
pass
def eval_model(model, trainloader, validloader, testloader, x, alphabet, k):
"""
[berTrain, berValid, berTest] = eval_model(model, trainloader, validloader, testloader, x, alphabet, k)
Función que evalua el modelo y devuelve las distintas tasas de error binarias
"""
# BER for saved model with train data
model.eval()
berTrain = 0
for symbols, combs, bits in trainloader:
# forward pass
output = model(symbols)
berTrain += ber(output,x,alphabet,bits,k)
berTrain /= len(trainloader)
berValid = 0
for symbols, combs, bits in validloader:
# forward pass
output = model(symbols)
berValid += ber(output,x,alphabet,bits,k)
berValid /= len(validloader)
berTest = 0
for symbols, combs, bits in testloader:
# forward pass
output = model(symbols)
berTest += ber(output,x,alphabet,bits,k)
berTest /= len(testloader)
return [berTrain, berValid, berTest]
def ber(output,x,alphabet,bits,k):
"""
ber = ber(output,x,alphabet,bits,k)
Calcula la tasa de error de bit de una señal transmitida
Entradas:
output = Salida de la Red Neuronal (bacth_size x n_combinations)
x = La matriz con las posibles combinaciones
alphabet = Los niveles de amplitud asociados a cada símbolo
bits = El conjunto de Bn transmitidos
k = Los bits que hay por símbolo
"""
Nb = k*output.size(0) # Número de bits transmitidos (k*Ns)
n = x.size(0) # Núermo de antenas
val, idx = torch.max(output,1) # El máximo de cada salida
# Es necesario modificar la variable 'bits' de manera que estén
# en la forma nxNb, correspondiendo cada fila a la transmisión de una antena
Bn = torch.empty(0,Nb)
for i in range(int(n)): Bn = torch.vstack((Bn,bits[:,i,:].flatten()))
Andetected = x[:,idx] # Detected symbols
Bndetected = t_simbolobit(Andetected.flatten(),alphabet).reshape(n,Nb) #Detected bits
errorsBit = Nb-torch.sum(Bndetected==Bn,axis=1)
return (errorsBit/Nb).reshape(n,1)
def detect(Rn,Bn,H,alphabet,Nb,Ns,n,method,**kwargs):
"""
ber = detect(Rn,Bn,H,alphabet,Nb,Ns,n,method,**kwargs)
Calcula la BER de los métodos ZF, LMMSE y ML
"""
if method == 'ZF':
Hinv = kwargs.get('Hinv',None)
Z = Hinv@Rn
elif method == 'LMMSE':
Es = kwargs.get('Es',None)
varzn = kwargs.get('varzn',None)
Z = torch.linalg.inv(H.T@H + varzn/Es*torch.eye(n))@H.T@Rn
elif method == 'ML':
x = kwargs.get('x',None)
hx = torch.unsqueeze(H@x,2) #H@x with shape (n,combinations,1)
Z = x[:,torch.argmin(torch.linalg.norm(Rn.reshape(n,1,Ns)-hx,dim=0),axis=0)]
Andetected = t_detectaSBF(Z.flatten(),alphabet).reshape(n,Ns) #Detected symbols
Bndetected = t_simbolobit(Andetected.flatten(),alphabet).reshape(n,Nb) #Detected bits
errorsBit = Nb-torch.sum(Bndetected==Bn,axis=1) # Nº Errors per bit
return (errorsBit/Nb).reshape(n,1) | osgofre/MIMO-DL | functions.py | functions.py | py | 20,722 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "numpy.reshape",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 40... |
38177168817 | import argparse
from dis import dis
import gym
import numpy as np
from itertools import count
import pyximport; pyximport.install()
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import MultivariateNormal
import matplotlib.pyplot as plt
import pybullet
import os
from tensorboardX import SummaryWriter
from config import env_name, log_interval, device, lr, gamma, env_seed, torch_seed, rand_init
from arguments import get_args
def save_state(net, opt, torch_seed, env_seed, fname, np_seed=None, py_seed=None,):
# save both model state and optimizer state
states = {
'state_dict': net.state_dict(),
'optimizer': opt.state_dict(),
'torch_seed': torch_seed,
'env_seed': env_seed,
'gamma': gamma,
'lr' : lr
# 'py_seed': py_seed,
# 'np_seed': np_seed
}
torch.save(states, fname)
class GaussNet(nn.Module):
def __init__(self):
super(GaussNet, self).__init__()
self.affine1 = nn.Linear(8, 32)
self.linear = nn.Linear(32,32)
self.affine2 = nn.Linear(32, 2)
def forward(self, x):
x = self.affine1(x)
x = F.relu(x)
x = self.linear(x)
x = F.relu(x)
mu = self.affine2(x)
x = F.relu(x)
return mu
def get_action(state, net, device):
state = torch.from_numpy(state).float().to(device)
mu = net(state).to(device)
#calculate the probability
dist = MultivariateNormal(mu, 0.1*torch.eye(2).to(device))
action = dist.sample()
log_prob = dist.log_prob(action)
return action, log_prob
def update_policy(policy_loss,optimizer, device):
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum().to(device)
policy_loss.backward()
optimizer.step()
def main():
args = get_args()
gym.logger.set_level(40)
env = gym.make(env_name,rand_init=rand_init)
env.seed(env_seed)
torch.manual_seed(torch_seed)
state= env.reset()
num_inputs = state.shape[0]
num_actions = env.action_space.shape[0]
print('state size:', num_inputs)
print('action space shape, highs, lows : ', env.action_space.shape," ",env.action_space.high," ",env.action_space.low)
net = GaussNet().to(device)
optimizer = optim.Adam(net.parameters(), lr=lr)
writer = SummaryWriter('logs')
if type(args.load_train) == list and (args.load_train[0] >-1 and args.load_train[1] >-1) :
load_path='{}_ep_{}_it_{}_type_{}.pkl'.format(args.rl_alg,args.load_train[0],args.load_train[1],args.alg_type)
if not os.path.exists(os.path.join(args.model_path,load_path)):
raise Exception("load path {} doesn't exist".format(load_path))
os.system('clear')
print(" Loading ... {}".format(load_path))
net.load_state_dict(torch.load(os.path.join(args.model_path,load_path))["state_dict"])
optimizer.load_state_dict(torch.load(os.path.join(args.model_path,load_path))["optimizer"])
start_iteration = args.load_train[1]
start_episode = 0
else:
start_iteration = 0
start_episode = 0
net.train()
steps=0
avg_itr_reward = []
num_iter = args.num_iterations
num_episodes = args.num_episodes
for itr in range(start_iteration,start_iteration + num_iter):
total_ep_reward = 0
log_probs_itr = []
policy_loss_itr = []
returns_itr = []
for i_episode in range(num_episodes):
state, ep_reward = env.reset(), 0
done = False
rewards = []
log_probs = []
returns=[]
while not done:
steps=steps+1;
action, log_prob = get_action(state, net, device)
state, reward, done, _ = env.step(action.cpu().numpy())
# print(reward)
ep_reward +=reward
rewards.append(reward)
log_probs.append(log_prob)
#Compute the returns from the rewards
R = 0
for r in rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.FloatTensor(returns).to(device)
#Store the log probability and returns to calculate the loss function
log_probs_itr.append(log_probs)
returns_itr.append(returns)
total_ep_reward += ep_reward
returns_ep = torch.cat(returns_itr)
# Find the mean and std of the returns over the episodes to compute
mean = returns_ep.mean()
std = returns_ep.std()
for log_probs, returns in zip(log_probs_itr,returns_itr):
# Use VANILLA REINFORCE with baseline
returns = (returns - mean) / std
policy_loss_ep = []
for log_prob, return_ in zip(log_probs,returns):
policy_loss_ep.append(-log_prob * return_)
policy_loss_ep = torch.stack(policy_loss_ep).to(device)
policy_loss_itr.append(policy_loss_ep)
avg_itr_reward.append(total_ep_reward / num_episodes)
# Update the policy neural network parameter using the Loss function
update_policy(policy_loss_itr,optimizer,device)
if itr % log_interval == 0:
print('iteration: {} | loss: {:.3f} | steps: {} | avg_reward: {:.2f}'.format(
itr, policy_loss_ep.sum(), steps, avg_itr_reward[-1]))
writer.add_scalar('log/avg_reward', float(avg_itr_reward[-1]), itr)
writer.add_scalar('log/loss', float(policy_loss_ep.sum()), itr)
if type(args.load_train) == list and (args.load_train[0] >-1 and args.load_train[1] >-1) :
model_path='{}_ep_{}_it_{}_type_{}.pkl'.format(args.rl_alg,args.num_episodes,args.load_train[1]+args.num_iterations,args.alg_type)
else:
model_path='{}_ep_{}_it_{}_type_{}.pkl'.format(args.rl_alg,args.num_episodes,args.num_iterations,args.alg_type)
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
save_state(net, optimizer, torch_seed, env_seed, os.path.join(args.model_path,model_path))
plt.figure()
plt.plot(range(itr+1),avg_itr_reward)
plt.title("Average Cumulative Reward vs iteration")
plt.xlabel("iteration")
plt.ylabel("Average Reward")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
plt.savefig("{}/{}_plot.png".format(args.save_path,model_path[:-4]))
if __name__ == '__main__':
main() | sashankmodali/CS593-Robotics | Assignment 4/Continuous/modified-gym-env/train_model.py | train_model.py | py | 6,564 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyximport.install",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.torch_seed",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "config.env_seed",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "config.gamma",
... |
27651236552 | '''
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
'''
from synthetic_sim_comperrors import *
import time
import numpy as np
import argparse
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--num-train', type=int, default=100,
help='Number of training simulations to generate.')
parser.add_argument('--num-valid', type=int, default=10000,
help='Number of validation simulations to generate.')
parser.add_argument('--num-test', type=int, default=10000,
help='Number of test simulations to generate.')
parser.add_argument('--length', type=int, default=10000,
help='Length of trajectory.')
parser.add_argument('--length-test', type=int, default=10000,
help='Length of test set trajectory.')
parser.add_argument('--sample-freq', type=int, default=100,
help='How often to sample the trajectory.')
parser.add_argument('--n-balls', type=int, default=5,
help='Number of balls in the simulation.')
parser.add_argument('--seed', type=int, default=42,
help='Random seed.')
parser.add_argument('--savefolder', type=str, default='springcharge_comperrors',
help='name of folder to save everything in')
parser.add_argument('--sim-type', type=str, default='springcharge',
help='Type of simulation system')
parser.add_argument('--timesteptest', type=str, default=True,
help='Generate many datasets with different timesteps')
parser.add_argument('--animation', type=str, default=False,
help='Generate animation of many datasets with different timesteps')
args = parser.parse_args()
os.makedirs(args.savefolder)
par_file = open(os.path.join(args.savefolder,'sim_args.txt'),'w')
print(args, file=par_file)
par_file.flush()
par_file.close()
# no noise-useless for our investigation
if args.sim_type == 'springcharge':
sim = SpringChargeSim(noise_var=0.0, n_balls=args.n_balls, box_size=5.0)
elif args.sim_type == 'springchargequad':
sim = SpringChargeQuadSim(noise_var=0.0, n_balls=args.n_balls, box_size=5.0)
elif args.sim_type == 'springquad':
sim = SpringQuadSim(noise_var=0.0, n_balls=args.n_balls, box_size=5.0)
elif args.sim_type == 'springchargefspring':
sim = SpringChargeFspringSim(noise_var=0.0, n_balls=args.n_balls, box_size=5.0)
np.random.seed(args.seed)
def generate_mse_pertime(num_sims, length, sample_freq):
loc_all = []
vel_all = []
_delta_T_ = []
for i in range(num_sims):
t = time.time()
loc_mse, vel_mse, delta_T = sim.sample_trajectory(T=length, sample_freq=sample_freq)
# dim 0 is the batch dimension, dim 1 is time: already averaged MSE over particles
loc_all.append(loc_mse)
vel_all.append(vel_mse)
_delta_T_ = delta_T
mse_loc = (np.asarray(loc_all).mean(axis=0)) / num_sims
mse_vel = (np.asarray(vel_all).mean(axis=0)) / num_sims
return mse_loc, mse_vel, delta_T
if (args.timesteptest):
print("Calculating MSE over time due to computational errors")
mse_loc, mse_vel, delta_T = generate_mse_pertime(args.num_train, args.length, args.sample_freq)
# dim 0 on mse is different delta_T, dim 1 is different times along the motion, then (x,y)
# [0,:,:] is the delta_T that the model gets
mse_model = mse_loc[0].mean(axis=1)
mse_model_vel = mse_vel[0].mean(axis=1)
np.save(os.path.join(args.savefolder, 'mse_model_pos.npy'), mse_model)
np.save(os.path.join(args.savefolder, 'mse_model_vel.npy'), mse_model_vel)
mse_loc_mean = (np.asarray(mse_loc).mean(axis=1).mean(axis=1))
fig = plt.figure()
plt.plot(delta_T, mse_loc_mean)
plt.xlabel('Time-step of Integration/(arbitrary units)')
plt.ylabel('Averaged Mean Square Error/(arbitrary units)')
plt.show()
"""
https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html
"""
y = np.multiply(delta_T, 10.0)
x = np.multiply(np.arange(0,(int(args.length / args.sample_freq)-1)*0.001, 0.001), 10.0)
Z = np.multiply((np.asarray(mse_loc).mean(axis=2)), 100.0)
X, Y = np.meshgrid(x,y)
fig = plt.figure()
ax = plt.axes(projection = '3d')
ax.plot_surface(X,Y,Z, cmap = 'plasma', edgecolor= 'none')
ax.set_ylabel('Time-step of Integration')
ax.set_xlabel('Time along trajectory')
ax.set_zlabel('Averaged Mean Square Error')
ax.view_init(45, -35)
fig.savefig(os.path.join(args.savefolder, 'timestepmse.eps'), format='eps')
# fig.savefig(os.path.join(args.savefolder, 'timestepmse.png'))
ax.view_init(75,-35)
fig.savefig(os.path.join(args.savefolder, 'timestepmse_birdseye.eps'), format='eps')
# fig.savefig(os.path.join(args.savefolder, 'timestepmse_birdseye.png'))
ax.view_init(45,225)
fig.savefig(os.path.join(args.savefolder, 'timestepmse_rotated.eps'), format='eps')
# fig.savefig(os.path.join(args.savefolder, 'timestepmse_rotated.png'))
plt.show()
# for i in range(len(mse_loc[0])):
# fig = plt.figure()
# mse_loc_mean = (np.asarray(mse_loc).mean(axis=2))
# timeslot = i*0.001
# plt.plot(delta_T, mse_loc_mean[:,i])
# plt.show()
if (args.animation):
if args.sim_type == 'springcharge':
sim.sample_trajectory_animation(T=args.length, sample_freq=args.sample_freq)
else:
print('Animation only implemented for springcharge Model') | vassilis-karavias/fNRIsigma-master | data/generate_dataset_comperrors.py | generate_dataset_comperrors.py | py | 5,643 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
15091010369 | import json
import os
import time
from models import Downloader, User
SETTINGS_FILE = "settings.json"
# Create history file if it does not exist
if not os.path.exists("history.txt"):
with open("history.txt", "w") as f:
f.write("")
# Check if the JSON file exists
if os.path.exists(SETTINGS_FILE):
# If the file exists, open it and load its contents
with open(SETTINGS_FILE, "r") as f:
data = json.load(f)
else:
# If the file doesn't exist, create it with default contents
data = {"domain": None}
with open(SETTINGS_FILE, "w") as f:
json.dump(data, f)
if data["domain"] is None:
print("Uzaktan eğitim websitesi adresini giriniz: (https:// olmadan)")
else:
print(f"Uzaktan eğitim websitesi adresini giriniz: ({data['domain']} için enter)")
domain = input()
while domain == "" and data["domain"] is None:
print("Lütfen geçerli bir adres giriniz.")
domain = input()
if data["domain"] is None and domain != "":
data["domain"] = domain
with open(SETTINGS_FILE, "w") as f:
json.dump(data, f)
elif data["domain"] is not None and domain == "":
domain = data["domain"]
print("Çerezleri giriniz:")
cookies = input()
while cookies == "":
print("Çerezler boş bırakılamaz.")
cookies = input()
print("Ders listesi alınıyor.")
user = User(domain, cookies)
courses = user.get_enrolled_courses()
downloader = Downloader()
selected_activities = []
choose_again = True
previous_downloads = set(line.strip() for line in open("history.txt", encoding="utf-8"))
while len(courses) > 0 and choose_again:
i = 1
for course in courses:
print(str(i) + ". " + course.name + " (" + str(course.activity_count) + ")")
i += 1
print("Ders seçiniz: ")
course_selection = int(input())
if 0 < course_selection <= len(courses):
course = courses[course_selection - 1]
print(course.name + " içeriği yükleniyor.")
course.activities = []
course.fetch_activities()
i = 1
for activity in course.activities:
try:
if activity.id in previous_downloads:
print(
"%d) Hafta: %d Dosya Adı: %s (Daha önce indirilmiş.)"
% (i, activity.weeks[0], activity.slug_name)
)
else:
print(
"%d) Hafta: %d Dosya Adı: %s"
% (i, activity.weeks[0], activity.slug_name)
)
except:
continue
i += 1
print("İçerik aralığı seçiniz (Örn: 1-17): ")
activity_selection = input().split("-")
while len(activity_selection) != 2:
print("Lütfen geçerli bir aralık giriniz.")
activity_selection = input().split("-")
if len(activity_selection) == 2:
start_activity = int(activity_selection[0])
end_activity = int(activity_selection[1])
if (
end_activity - start_activity >= 0
and start_activity > 0
and 0 < end_activity <= len(course.activities)
):
for i in range(start_activity - 1, end_activity):
activity = course.activities[i]
activity.prepare_video(downloader)
selected_activities.append(activity)
print("Başka bir dersten içerik seçmek ister misiniz? (e/h)")
choose_again = input().lower() == "e"
if selected_activities:
start = time.time()
downloader.start_downloads()
end = time.time()
else:
print(selected_activities)
print("İçerik seçilmedi.")
| ahmethakanbesel/alms-video-indirme-araci | app.py | app.py | py | 3,731 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
6835048152 | '''
This script contains functions which allow a user to select a ROI on a
single/multiple samples
These functions in a variety of ways call the roiselector function which is a GUI
to allow a user to select points. The functionionality of each funcitons is as follows:
ChangePoint: allows a user to CHANGE the locations of found points corresponding
to the matched images and their .feat files
SelectArea: allows a user to propogate a selected area through a stack of images
(works with any stack but probs best to use it after aligning tissue)
SelectPoint: allows a user to SELECT a single point. used to manually annotate
images during the featfinding period
'''
from time import sleep
import matplotlib
matplotlib.use('TkAgg')
import cv2
import numpy as np
# import tensorflow as tf
from glob import glob
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
import tifffile as tifi
from multiprocessing import Pool, cpu_count
from copy import deepcopy
from itertools import repeat
if __name__.find("HelperFunctions") == -1:
from Utilities import *
else:
from HelperFunctions.Utilities import *
# for each fitted pair, create an object storing their key information
class feature:
def __init__(self, refP = None, tarP = None, dist = None, size = None, res = None, ID = None):
# the position of the match on the reference image
self.refP = refP
# the position of the match on the target image
self.tarP = tarP
# eucledian error of the difference in gradient fields
self.dist = dist
# the size of the feature
self.size = size
# the resolution index of the image that was processed
self.res = res
self.ID = ID
def __repr__(self):
return repr((self.dist, self.refP, self.tarP, self.size, self.res))
def featChangePoint(dataSource, ref, tar, featureInfo = None, nopts = 5, ts = 4, title = "Matching"):
# this fuction brings up a gui which allows for a user to manually CHANGE
# features on the images. This modifies the original .feat file
# Inputs: (dataSource), the directory of where to get the info
# (ref, tar), reference and target name of the samples to change
# (nopts), number of point to change the annotations of, defaults 5
# (ts), text size for plots
# Outputs: (matchRef, matchTar), updated ref and target features with new points added
# (), also saves the new ref and tar positions in the SAME location
# if modifying files
if dataSource is None:
imgref = ref
imgtar = tar
matchRefO = {}
matchTarO = {}
# if there are not features found, break
if len(featureInfo) == 0:
pass
# if the info is in a list of objects
elif type(featureInfo[0]) is not dict:
for n, f in enumerate(featureInfo):
matchRefO[n] = f.refP
matchTarO[n] = f.tarP
else:
# if dictionaries are passed in as a list
matchRefO = featureInfo[0]
matchTarO = featureInfo[1]
# if modifying arrays
else:
# get the dirs of the info
infodirs = dataSource + 'info/'
# if doing it for the main images
try:
imgdirs = dataSource + 'maskedSamples/'
# get the images, doesn't m
imgrefdir = glob(imgdirs + ref + ".*")[0]
imgtardir = glob(imgdirs + tar + ".*")[0]
imgref = cv2.imread(imgrefdir)
imgtar = cv2.imread(imgtardir)
# if doing it for the segSections
except:
imgdirs = dataSource
imgrefdir = glob(imgdirs + ref + ".*")[0]
imgtardir = glob(imgdirs + tar + ".*")[0]
imgref = cv2.imread(imgrefdir)
imgtar = cv2.imread(imgtardir)
matchRefdir = infodirs + ref + ".reffeat"
matchTardir = infodirs + tar + ".tarfeat"
matchRefO = txtToDict(matchRefdir, float)[0]
matchTarO = txtToDict(matchTardir, float)[0]
# automatically set the text size
ts = imgref.shape[0]/1000
_, commonFeat = uniqueKeys([matchRefO, matchTarO])
# create the dictionary with ONLY the common features and their positions
# for the number of features specified by nofts
matchRef = {}
matchTar = {}
# select only up to the specified number of points to use
if nopts < len(commonFeat):
commonFeat = commonFeat[:nopts]
# if there are less matched points than desired, add some fake ones
# to move around later
elif nopts > len(commonFeat):
for i in range(nopts - len(commonFeat)):
pt = "added_" + str(i)
commonFeat.append(pt)
matchRefO[pt] = np.array([0, 0])
matchTarO[pt] = np.array([0, 0])
for cf in commonFeat:
matchRef[cf] = matchRefO[cf]
matchTar[cf] = matchTarO[cf]
# create a standard combined image
imgs = [imgref, imgtar]
# get the image dimensions
imgshapes = []
for img in imgs:
imgshapes.append(np.array(img.shape))
# create a max size field of all images
xm, ym, cm = np.max(np.array(imgshapes), axis = 0)
field = np.zeros((xm, ym, cm)).astype(np.uint8)
# stack the images next to each other to create a combined image
imgsStand = []
for img in imgs:
xr, yr, c = img.shape
img_s = field.copy(); img_s[:xr, :yr, :] = img
imgsStand.append(img_s)
imgCombine = np.hstack(imgsStand)
featChange = {}
cv2.startWindowThread()
for feat in commonFeat:
# get the pair of features to change
featChange['ref'] = matchRef[feat]
featChange['tar'] = matchTar[feat]
for featC in featChange:
featID = featChange[featC]
# draw on the points
imgCombineA = annotateImg(imgCombine.copy(), [matchRef, matchTar], ts)
if featC == 'tar':
featID += np.array([ym, 0])
# highlight the point which is being changed
cv2.circle(imgCombineA, tuple(featID.astype(int)), int(ts*12), (0, 255, 0), int(ts*8))
# get the x and y position from the feature
y, x = roiselector(imgCombineA, title)
# if the window is closed to skip selecting a feature, keep the feature
if (x * y == 0).all():
yme = featID[0]
xme = featID[1]
else:
yme = np.mean(y)
xme = np.mean(x)
# append reference and target information to the original list
if featC == 'ref':
matchRef[feat] = np.array((yme, xme))
elif featC == "tar":
matchTar[feat] = np.array((yme, xme)) - np.array([ym, 0])
# save the new manually added positions to the original location, REPLACING the
# information
# return the data in the same format as the input
if dataSource is None:
# if the input was an object, return an object
featInfos = []
if type(featureInfo[0]) is not dict:
for f in matchRef:
featInfos.append(feature(refP = matchRef[f], tarP = matchTar[f], \
dist = -1, size = 100, res = -1, ID = None))
else:
featInfos = [matchRef, matchTar]
else:
dictToTxt(matchRef, matchRefdir, shape = imgref.shape, fit = False)
dictToTxt(matchTar, matchTardir, shape = imgtar.shape, fit = False)
# if the input was a list of dictionaries, return a list of dictionaries
featInfos = [matchRef, matchTar]
return(featInfos)
def featSelectArea(datahome, size, feats = 1, sample = 0, normalise = False, prefix = "tif"):
# this function brings up a gui which allows user to manually selection a
# roi on the image. This extracts samples from the aligned tissues and saves them
cpuCount = int(cpu_count() * 0.75)
segSections = datahome + str(size) + "/segSections/"
serialised = False
for f in range(feats):
dirMaker(segSections + "seg" + str(f) + "/")
alignedSamples = datahome + str(size) + "/RealignedSamples/"
# get all the samples to be processed
samples = sorted(glob(alignedSamples + "*" + prefix))
# get the image to be used as the reference
if type(sample) == int:
refpath = samples[sample]
elif type(sample) == str:
refpath = glob(alignedSamples + sample + "*.tif")[0]
# load the image
try:
img = cv2.cvtColor(tifi.imread(refpath), cv2.COLOR_BGR2RGB)
except:
img = cv2.imread(refpath)
# create a small scale version of the image for color normalisation
if normalise: imgref = cv2.resize(img, (int(img.shape[1] * 0.1), int(img.shape[0] * 0.1)))
else: imgref = None
# extract n feats from the target samples
x = {}
y = {}
for f in range(feats):
x[f], y[f] = roiselector(img)
cv2.rectangle(img, (int(x[f][0]), int(y[f][0])), (int(x[f][1]), int(y[f][1])), (255, 255, 255), 40)
cv2.rectangle(img, (int(x[f][0]), int(y[f][0])), (int(x[f][1]), int(y[f][1])), (0, 0, 0), 20)
shapes = {}
if serialised:
for s in samples:
name = nameFromPath(s)
shapes[name] = sectionExtract(s, segSections, feats, x, y, imgref)
else:
with Pool(processes=cpuCount) as pool:
shapes = pool.starmap(sectionExtract, zip(samples, repeat(segSections), repeat(feats), repeat(x), repeat(y), repeat(imgref)))
# create a dictionary of all the tif shapes. they're all the same size,
# its just about ensuring the input into align is consistent
for i in range(feats):
imgShapes = {}
for n, s in enumerate(samples):
name = nameFromPath(s, 3)
imgShapes[name] = shapes[n][i]
dictToTxt(imgShapes, segSections + "seg" + str(i) + "/info/all.tifshape")
def sectionExtract(path, segSections, feats, x, y, ref = None):
img = cv2.imread(path)
name = nameFromPath(path, 3)
sections = []
segShapes = {}
# if a reference image is being used to normalise the images
if (type(ref) is list) or (type(ref) is np.ndarray):
img = hist_match(img, ref)
for f in range(feats):
print(name + " section " + str(f))
segdir = segSections + "seg" + str(f) + "/"
section = img[int(y[f][0]):int(y[f][1]), int(x[f][0]):int(x[f][1]), :]
cv2.imwrite(segdir + name + ".tif", section)
segShapes[f] = section.shape
return(segShapes)
def roiselector(img, title = "Matching"):
# function which calls the gui and get the co-ordinates
# Inputs (img), numpy array image of interest
# Outputs (xme, yme), x and y positions on the original image of the points selected
# ensure that the image is scaled to a resolution which can be used in the sceen
xc, yc, c = img.shape
r = xc/yc
# if the height is larger than the width
if xc > yc/2:
size = 700
sizeY = int(size / r)
sizeX = int(size)
# if the width is larger than the height
else:
size = 1200
sizeY = int(size)
sizeX = int(size * r)
scale = yc / sizeY
# perform a search over a reduced size area
imgr = cv2.cvtColor(cv2.resize(img, (sizeY, sizeX)), cv2.COLOR_RGB2BGR)
x, y = annotatorGUI(imgr, title + " ENSURE YOU WAIT ~1 SEC BEFORE PRESSING ENTER")
# get the postions
# y = np.array([roi[1], roi[1] + roi[3]])
# x = np.array([roi[0], roi[0] + roi[2]])
# scale the positions back to their original size
y = y * scale
x = x * scale
return(x, y)
def annotatorGUI(img, title):
def line_select_callback(eclick, erelease):
"""
Callback for line selection.
*eclick* and *erelease* are the press and release events.
"""
# get the click positions
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
print(str([np.array([x1, x2]), np.array([y1, y2])]))
# turn selection into point
global allPos
allPos = [np.array([x1, x2]), np.array([y1, y2])]
def toggle_selector(event):
if event.key == 'enter':
toggle_selector.RS.set_active(False)
fig.canvas.mpl_disconnect(toggle_selector)
plt.close()
# toggle_selector.RS.set_active(True)
fig, ax = plt.subplots(figsize = (16, 12))
global allPos
allPos = [np.array([0, 0]), np.array([0, 0])]
plt.axis("off")
plt.imshow(img)
ax.set_title(title)
# drawtype is 'box' or 'line' or 'none'
toggle_selector.RS = RectangleSelector(ax, line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # disable middle button
minspanx=1, minspany=1,
spancoords='pixels',
interactive=True)
# get the positions by creating a global variable to access the callback function
fig.canvas.mpl_connect('key_press_event', toggle_selector)
plt.show()
try:
return(allPos)
# if no new position declared, return the origin
except:
return([0, 0])
def annotateImg(imgs, info, ts):
# this function takes an image and from a list of .feat dictionaries, draws
# the position and information on and returns the images combined
# Inputs: (imgs), image type. if it is a list combine but if it is a single image just use as is
# (info), list of .feat dictionaries to annotate onto the images
# (ts), text size
# Outputs: (imgcombine), the combined image which has all the annotated features
# if multiple images are being fed in then combine in a uniform array
if type(imgs) is list:
# get the image dimensions
imgshapes = []
for img in imgs:
imgshapes.append(np.array(img.shape))
# create a max size field of all images
xm, ym, cm = np.max(np.array(imgshapes), axis = 0)
field = np.zeros((xm, ym, cm)).astype(np.uint8)
# stack the images next to each other to create a combined image
imgsStand = []
for img in imgs:
xr, yr, c = img.shape
img_s = field.copy(); img_s[:xr, :yr, :] = img
imgsStand.append(img_s)
imgCombine = np.hstack(imgsStand)
# if a single image is being used then do all the annotations on this unmodified
else:
imgCombine = imgs
xm, ym, cm = (np.array(imgCombine.shape) / len(info)).astype(int)
for m, match in enumerate(info):
for pos, r in enumerate(match):
if type(match) is dict:
r = match[r]
# enusre that the co-ordinate is in the right format and position
if type(r) is np.ndarray: r = tuple((r.astype(int)) + np.array([int(ym * m), 0]))
else: r = tuple(np.array(r).astype(int) + np.array([int(ym * m), 0]))
# if type(t) is np.ndarray: t = tuple(t.astype(int)
# add the found points as marks
cv2.circle(imgCombine, r, int(ts*10), (255, 0, 0), int(ts*5))
# add point info to the image
cv2.putText(imgCombine, str(pos),
tuple(r + np.array([20, 0])),
cv2.FONT_HERSHEY_SIMPLEX, ts, (255, 255, 255), int(ts*5))
cv2.putText(imgCombine, str(pos),
tuple(r + np.array([20, 0])),
cv2.FONT_HERSHEY_SIMPLEX, ts, (0, 0, 0), int(ts*2.5))
return(imgCombine)
if __name__ == "__main__":
'''
dataSource = '/Volumes/USB/H653/3/maskedSamples/'
nameref = 'H653_01A_0.jpg'
nametar = 'H653_02A_0.jpg'
matchRef = {}
matchTar = {}
matchRef = {
'feat_0':np.array([594, 347]),
'feat_1':np.array([ 254, 1002]),
'feat_2':np.array([ 527, 1163]),
'feat_3':np.array([322, 262])
}
matchTar = {
'feat_2':np.array([ 533, 1131]),
'feat_3':np.array([294, 239]),
'feat_4':np.array([287, 899]),
'feat_5':np.array([608, 255])
}
featSelectPoint(nameref, nametar, matchRef, matchTar)
'''
dataSource = '/Volumes/USB/H653/'
dataSource = '/Volumes/USB/H673A_7.6/'
dataSource = '/Volumes/Storage/H653A_11.3new/'
dataSource = '/Volumes/USB/H653A_11.3/'
dataSource = '/Volumes/USB/H710C_6.1/'
dataSource = '/Volumes/USB/H671A_18.5/'
size = 3
featChangePoint(r, f, ts = 4)
# featSelectArea(dataSource, size, 2, 0, False)
'''
'H710C_289A+B_0',
'H710C_304A+B_0',
'H710C_308A+B_0',
'H710C_308C_0',
ref = [
'H710C_311C_0'
]
'H710C_289A+B_1',
'H710C_304A+B_1',
'H710C_308A+B_1',
'H710C_309A+B_0',
tar = [
'H710C_312A+B_0'
]
for r, f in zip(ref, tar):
featChangePoint(r, f, ts = 4)
''' | JonoSax/3DHistologicalReconstruction | HelperFunctions/SP_SampleAnnotator.py | SP_SampleAnnotator.py | py | 17,188 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": ... |
6648609464 | import logging
import math
import numpy as np
import util.annotation
import util.metadata_service
import util.provenance_metadata_store
from util.annotation import AnnotationStore
from engine import app
from ooi_data.postgres.model import Parameter, Stream, NominalDepth
from util.asset_management import AssetManagement
from util.cass import fetch_l0_provenance
from util.common import log_timing, StreamEngineException, StreamKey, MissingDataException, read_size_config, \
isfillvalue, QC_SUFFIXES
from util.metadata_service import build_stream_dictionary, get_available_time_range
from util.qc_executor import QcExecutor
from util.qartod_qc_executor import QartodQcExecutor
from util.stream_dataset import StreamDataset
log = logging.getLogger()
PRESSURE_DPI = app.config.get('PRESSURE_DPI')
GPS_STREAM_ID = app.config.get('GPS_STREAM_ID')
GPS_LAT_PARAM_ID = app.config.get('GPS_LAT_PARAM_ID')
GPS_LON_PARAM_ID = app.config.get('GPS_LON_PARAM_ID')
LAT_PARAM_ID = app.config.get('LAT_PARAM_ID')
LON_PARAM_ID = app.config.get('LON_PARAM_ID')
INTERP_LAT_PARAM_ID = app.config.get('INTERP_LAT_PARAM_ID')
INTERP_LON_PARAM_ID = app.config.get('INTERP_LON_PARAM_ID')
PRESSURE_DEPTH_PARAM_ID = app.config.get('PRESSURE_DEPTH_PARAM_ID')
INT_PRESSURE_NAME = app.config.get('INT_PRESSURE_NAME')
MAX_DEPTH_VARIANCE = app.config.get('MAX_DEPTH_VARIANCE')
MAX_DEPTH_VARIANCE_METBK = app.config.get('MAX_DEPTH_VARIANCE_METBK')
ASSET_HOST = app.config.get('ASSET_HOST')
SIZE_ESTIMATES = read_size_config(app.config.get('SIZE_CONFIG'))
DEFAULT_PARTICLE_DENSITY = app.config.get('PARTICLE_DENSITY', 1000) # default bytes/particle estimate
SECONDS_PER_BYTE = app.config.get('SECONDS_PER_BYTE', 0.0000041) # default bytes/sec estimate
MINIMUM_REPORTED_TIME = app.config.get('MINIMUM_REPORTED_TIME')
DEPTH_FILL = app.config['DEPTH_FILL']
PRESSURE_DEPTH_APPLICABLE_STREAM_KEYS = app.config['PRESSURE_DEPTH_APPLICABLE_STREAM_KEYS']
DEPTH_PARAMETER_NAME = app.config.get('DEPTH_PARAMETER_NAME')
class StreamRequest(object):
"""
Stores the information from a request, and calculates the required
parameters and their streams
"""
def __init__(self, stream_key, parameters, time_range, uflags, qc_parameters=None,
limit=None, include_provenance=False, include_annotations=False, strict_range=False,
request_id='', collapse_times=False, execute_dpa=True, require_deployment=True):
if not isinstance(stream_key, StreamKey):
raise StreamEngineException('Received no stream key', status_code=400)
# Inputs
self.request_id = request_id
self.stream_key = stream_key
self.requested_parameters = parameters
self.time_range = time_range
self.uflags = uflags
self.qc_executor = QcExecutor(qc_parameters, self)
self.qartod_qc_executor = QartodQcExecutor(self)
self.limit = limit
self.include_provenance = include_provenance
self.include_annotations = include_annotations
self.strict_range = strict_range
self.execute_dpa = execute_dpa
self.require_deployment = require_deployment
# Internals
self.asset_management = AssetManagement(ASSET_HOST, request_id=self.request_id)
self.stream_parameters = {}
self.unfulfilled = set()
self.datasets = {}
self.external_includes = {}
self.annotation_store = AnnotationStore()
self._initialize()
if collapse_times:
self._collapse_times()
def __repr__(self):
return str(self.__dict__)
@property
def needs_cc(self):
"""
Return the list of calibration coefficients necessary to compute all data products for this request
:return:
"""
stream_list = []
for sk in self.stream_parameters:
needs = list(sk.stream.needs_cc)
d = sk.as_dict()
d['coefficients'] = needs
stream_list.append(d)
return stream_list
@log_timing(log)
def fetch_raw_data(self):
"""
Fetch the source data for this request
:return:
"""
# Start fetching calibration data from Asset Management
am_events = {}
am_futures = {}
for stream_key in self.stream_parameters:
refdes = '-'.join((stream_key.subsite, stream_key.node, stream_key.sensor))
am_futures[stream_key] = self.asset_management.get_events_async(refdes)
# Resolve calibration data futures and attach to instrument data
for stream_key in am_futures:
events = am_futures[stream_key].result()
am_events[stream_key] = events
# Start fetching instrument data
for stream_key, stream_parameters in self.stream_parameters.iteritems():
other_streams = set(self.stream_parameters)
other_streams.remove(stream_key)
# Pull a data point on either side of the requested time range only for supporting streams
# to improve interpolation
should_pad = stream_key != self.stream_key
if not stream_key.is_virtual:
log.debug('<%s> Fetching raw data for %s', self.request_id, stream_key.as_refdes())
sd = StreamDataset(stream_key, self.uflags, other_streams, self.request_id)
sd.events = am_events[stream_key]
try:
sd.fetch_raw_data(self.time_range, self.limit, should_pad)
self.datasets[stream_key] = sd
except MissingDataException as e:
if stream_key == self.stream_key:
raise MissingDataException("Query returned no results for primary stream")
elif stream_key.stream in self.stream_key.stream.source_streams:
raise MissingDataException("Query returned no results for source stream")
else:
log.error('<%s> %s', self.request_id, e.message)
else:
log.debug('<%s> Creating empty dataset for virtual stream: %s',
self.request_id, stream_key.as_refdes())
sd = StreamDataset(stream_key, self.uflags, other_streams, self.request_id)
sd.events = am_events[stream_key]
self.datasets[stream_key] = sd
self._exclude_flagged_data()
self._exclude_nondeployed_data()
# Verify data still exists after masking virtual
message = 'Query returned no results for %s stream (due to deployment or annotation mask)'
if self.stream_key.is_virtual:
found_streams = [stream.stream for stream in self.datasets
if self.datasets[stream]]
if not any(stream in self.stream_key.stream.source_streams for stream in found_streams):
raise MissingDataException(message % 'source')
# real
else:
primary_stream_dataset = self.datasets[self.stream_key]
if not primary_stream_dataset.datasets:
raise MissingDataException(message % 'primary')
# Remove any empty, non-virtual supporting datasets
for stream_key in list(self.datasets):
if not stream_key.is_virtual:
if not self.datasets[stream_key].datasets:
del self.datasets[stream_key]
# Remove pressure_depth if it is not applicable to prevent misguided uses of rubbish
# pressure_depth data when pressure should be interpolated from the CTD stream
for stream_key in list(self.datasets):
if not self._is_pressure_depth_valid(stream_key) and self.datasets[stream_key].datasets:
for _, ds in self.datasets[stream_key].datasets.iteritems():
pressure_depth = Parameter.query.get(PRESSURE_DEPTH_PARAM_ID)
if pressure_depth.name in ds:
del ds[pressure_depth.name]
@staticmethod
def missing_params_of_dataset_depend_on_another(dataset, another):
if not (dataset.missing and another.missing):
return False
missing_param_dependencies = set()
for param_dependencies_dict in dataset.missing.values():
for param_dependencies in param_dependencies_dict.values():
for dependency in param_dependencies.values():
missing_param_dependencies.add(dependency)
for param_list in another.params.values():
for param in param_list:
if (another.stream_key.stream, param) in missing_param_dependencies:
return True
return False
@staticmethod
def compare_datasets_by_missing(ds1, ds2):
if ds1.stream_key.is_virtual:
return 1
if ds2.stream_key.is_virtual:
return -1
if StreamRequest.missing_params_of_dataset_depend_on_another(ds1, ds2):
return 1
if StreamRequest.missing_params_of_dataset_depend_on_another(ds2, ds1):
return -1
return 0
def calculate_derived_products(self):
# Calculate all internal-only data products
for sk in self.datasets:
if not sk.is_virtual:
self.datasets[sk].calculate_all(ignore_missing_optional_params=False)
# Sort the datasets in case a derived parameter requires an another external parameter to be calculated first
sorted_datasets = sorted(self.datasets.values(), cmp=StreamRequest.compare_datasets_by_missing)
sorted_stream_keys = [ds.stream_key for ds in sorted_datasets]
# Allow each StreamDataset to interpolate any needed non-virtual parameters from the other datasets
# Then calculate any data products which required only non-virtual external input.
for sk in sorted_stream_keys:
if not sk.is_virtual:
self.datasets[sk].interpolate_needed(self.datasets, interpolate_virtual=False)
self.datasets[sk].calculate_all(ignore_missing_optional_params=True)
for sk in self.datasets:
if sk.is_virtual:
for poss_source in self.datasets:
if poss_source.stream in sk.stream.source_streams:
self.datasets[sk].calculate_virtual(self.datasets[poss_source])
break
# Allow each StreamDataset to interpolate any needed virtual parameters from the other datasets
# Then calculate any data products which required virtual external input.
for sk in self.datasets:
if not sk.is_virtual:
self.datasets[sk].interpolate_needed(self.datasets, interpolate_virtual=True)
self.datasets[sk].calculate_all()
for sk in self.datasets:
self.datasets[sk].fill_missing()
def execute_qc(self):
self._run_qc()
def execute_qartod_qc(self):
self._run_qartod_qc()
def insert_provenance(self):
self._insert_provenance()
self._add_location()
@log_timing(log)
def _run_qc(self):
# execute any QC
for sk, stream_dataset in self.datasets.iteritems():
for param in sk.stream.parameters:
for dataset in stream_dataset.datasets.itervalues():
self.qc_executor.qc_check(param, dataset)
@log_timing(log)
def _run_qartod_qc(self):
self.qartod_qc_executor.execute_qartod_tests()
# noinspection PyTypeChecker
def _insert_provenance(self):
"""
Insert all source provenance for this request. This is dependent on the data already having been fetched.
:return:
"""
if self.include_provenance:
for stream_key in self.stream_parameters:
if stream_key in self.datasets:
self.datasets[stream_key].insert_instrument_attributes()
prov_metadata = self.datasets[stream_key].provenance_metadata
prov_metadata.add_query_metadata(self, self.request_id, 'JSON')
prov_metadata.add_instrument_provenance(
stream_key,
[deployment_event for deployment_event in self.datasets[stream_key].events.events
if deployment_event["deploymentNumber"] in self.datasets[stream_key].datasets.keys()])
for deployment, dataset in self.datasets[stream_key].datasets.iteritems():
if 'provenance' in dataset:
provenance = dataset.provenance.values.astype('str')
prov = fetch_l0_provenance(stream_key, provenance, deployment)
prov_metadata.update_provenance(prov, deployment)
def insert_annotations(self):
"""
Insert all annotations for this request.
"""
for stream_key in self.stream_parameters:
self.annotation_store.add_query_annotations(stream_key, self.time_range)
def _exclude_flagged_data(self):
"""
Exclude data from datasets based on annotations
TODO: Future optimization, avoid querying excluded data when possible
:return:
"""
for stream_key, stream_dataset in self.datasets.iteritems():
stream_dataset.exclude_flagged_data(self.annotation_store)
def _exclude_nondeployed_data(self):
"""
Exclude data from datasets that are outside of deployment dates
:return:
"""
for stream_key, stream_dataset in self.datasets.iteritems():
stream_dataset.exclude_nondeployed_data(self.require_deployment)
def _is_pressure_depth_valid(self, stream_key):
"""
Returns true if the stream key corresponds to an instrument which should use pressure_depth instead of
int_ctd_pressure. Many streams have a pressure_depth parameter which is filled with unusable data. This
function handles determining when the pressure_depth parameter is usable based on a lookup.
"""
stream_key = stream_key.as_dict()
for candidate_key in PRESSURE_DEPTH_APPLICABLE_STREAM_KEYS:
# ignore fields in candidate_key which are set to None as None means wildcard
fields_to_match = {k: candidate_key[k] for k in candidate_key if candidate_key[k] != None}
# compute the difference in the non-None fields
mismatch = {k: stream_key[k] for k in fields_to_match if stream_key[k] != candidate_key[k]}
if not mismatch:
return True
return False
def import_extra_externals(self):
# import any other required "externals" into all datasets
for source_sk in self.external_includes:
if source_sk in self.datasets:
for param in self.external_includes[source_sk]:
for target_sk in self.datasets:
self.datasets[target_sk].interpolate_into(source_sk, self.datasets[source_sk], param)
# determine if there is a pressure parameter available (9328) - should be none when _is_pressure_depth_valid evaluates to True
pressure_params = [(sk, param) for sk in self.external_includes for param in self.external_includes[sk]
if param.data_product_identifier == PRESSURE_DPI]
if not pressure_params:
return
# integrate the pressure parameter into the stream
pressure_key, pressure_param = pressure_params.pop()
pressure_name = '-'.join((pressure_key.stream.name, pressure_param.name))
if pressure_key not in self.datasets:
return
# interpolate CTD pressure
self.datasets[self.stream_key].interpolate_into(pressure_key, self.datasets.get(pressure_key), pressure_param)
for deployment in self.datasets[self.stream_key].datasets:
ds = self.datasets[self.stream_key].datasets[deployment]
# If we used the CTD pressure, then rename it to the configured final name (e.g. 'int_ctd_pressure')
if pressure_name in ds.data_vars:
pressure_value = ds.data_vars[pressure_name]
del ds[pressure_name]
pressure_value.name = INT_PRESSURE_NAME
self.datasets[self.stream_key].datasets[deployment][INT_PRESSURE_NAME] = pressure_value
# determine if there is a depth parameter available
# depth is computed from pressure, so look for it in the same stream
depth_key, depth_param = self.find_stream(self.stream_key, tuple(Parameter.query.filter(Parameter.name == DEPTH_PARAMETER_NAME)), pressure_key.stream)
if not depth_param:
return
if depth_key not in self.datasets:
return
# update external_includes for any post processing that looks at it - pressure was already handled, but depth was not
self.external_includes.setdefault(depth_key, set()).add(depth_param)
# interpolate depth computed from CTD pressure
self.datasets[self.stream_key].interpolate_into(depth_key, self.datasets.get(depth_key), depth_param)
def rename_parameters(self):
"""
Some internal parameters are not well suited for output data files (e.g. NetCDF). To get around this, the
Parameter class has a netcdf_name attribute for use in output files. This function performs the translations
from internal name (Parameter.name) to output name (Parameter.netcdf_name).
"""
# build a mapping from original parameter name to netcdf_name
parameter_name_map = {x.name: x.netcdf_name for x in self.requested_parameters if x.netcdf_name != x.name}
for external_stream_key in self.external_includes:
for parameter in [x for x in self.external_includes[external_stream_key] if x.netcdf_name != x.name]:
long_parameter_name = external_stream_key.stream_name + "-" + parameter.name
# netcdf_generator.py is expecting the long naming scheme
parameter_name_map[long_parameter_name] = external_stream_key.stream_name + "-" + parameter.netcdf_name
#make sure netcdf_name parameter is used for co-located CTD's (15486)
parameter_name_map[parameter.name] = parameter.netcdf_name
# pass the parameter mapping to the annotation store for renaming there
if self.include_annotations:
self.annotation_store.rename_parameters(parameter_name_map)
# generate possible qc/qartod renamings too so they will be handled in the update loop below
qartod_name_map = {}
for suffix in ['_qc_executed', '_qc_results', '_qartod_executed', '_qartod_results']:
qartod_name_map.update({name + suffix: netcdf_name + suffix for name, netcdf_name in
parameter_name_map.iteritems()})
parameter_name_map.update(qartod_name_map)
# update parameter names
for stream_key, stream_dataset in self.datasets.iteritems():
for deployment, ds in stream_dataset.datasets.iteritems():
for key in [x for x in parameter_name_map.keys() if x in ds]:
# add an attribute to help users associate the renamed variable with its original name
ds[key].attrs['alternate_parameter_name'] = key
# rename
ds.rename({key: parameter_name_map[key]}, inplace=True)
def _add_location(self):
log.debug('<%s> Inserting location data for all datasets', self.request_id)
for stream_dataset in self.datasets.itervalues():
stream_dataset.add_location()
def _locate_externals(self, parameters):
"""
Locate external data sources for the given list of parameters
:param parameters: list of type Parameter
:return: found parameters as dict(StreamKey, Parameter), unfulfilled parameters as set(Parameter)
"""
log.debug('<%s> _locate_externals: %r', self.request_id, parameters)
# A set of tuples of the dependant stream and the required parameters that it depends on.
# Initially, it is just the requested stream and external parameters that it needs.
external_to_process = set()
for param in parameters:
external_to_process.add((self.stream_key, param))
found = {}
external_unfulfilled = set()
stream_parameters = {}
def process_found_stream(stream_key, parameter):
"""
Internal subroutine to process each found stream/parameter
:param stream_key: StreamKey found by find_stream
:param parameter: Parameter inside found stream
:return: None
"""
found.setdefault(stream_key, set()).add(parameter)
sk_needs_internal = stream_key.stream.needs_internal([parameter])
sk_needs_external = stream_key.stream.needs_external([parameter])
log.debug('<%s> _locate_externals FOUND INT: %r %r', self.request_id,
stream_key.as_refdes(), sk_needs_internal)
log.debug('<%s> _locate_externals FOUND EXT: %r %r', self.request_id,
stream_key.as_refdes(), sk_needs_external)
# Add externals not yet processed to the to_process set
for sub_need in sk_needs_external:
if sub_need not in external_unfulfilled:
external_to_process.add((stream_key, sub_need))
# Add internal parameters to the corresponding stream set
stream_parameters.setdefault(stream_key, set()).update(sk_needs_internal)
while external_to_process:
# Pop an external from the list of externals to process
stream_key, external = external_to_process.pop()
stream, poss_params = external
# all non-virtual streams define PD7, skip
if poss_params[0].id == 7:
continue
log.debug('<%s> _locate_externals: STREAM: %r POSS_PARAMS: %r', self.request_id, stream, poss_params)
found_sk, found_param = self.find_stream(stream_key, poss_params, stream=stream)
if found_sk:
process_found_stream(found_sk, found_param)
else:
external_unfulfilled.add(external)
return stream_parameters, found, external_unfulfilled
@log_timing(log)
def _get_mobile_externals(self):
"""
For mobile assets, build the set of externals necessary to provide location data
:return: set((Stream, (Parameter,)))
"""
external_to_process = set()
if self.stream_key.is_mobile and not self._is_pressure_depth_valid(self.stream_key):
# add pressure parameter
external_to_process.add((None, tuple(Parameter.query.filter(
Parameter.data_product_identifier == PRESSURE_DPI).all())))
# do NOT add depth parameter here; we want to make sure it comes from the
# same stream as the pressure parameter (which has not been determined yet)
if self.stream_key.is_glider:
gps_stream = Stream.query.get(GPS_STREAM_ID)
external_to_process.add((gps_stream, (Parameter.query.get(GPS_LAT_PARAM_ID),)))
external_to_process.add((gps_stream, (Parameter.query.get(GPS_LON_PARAM_ID),)))
external_to_process.add((gps_stream, (Parameter.query.get(LAT_PARAM_ID),)))
external_to_process.add((gps_stream, (Parameter.query.get(LON_PARAM_ID),)))
external_to_process.add((gps_stream, (Parameter.query.get(INTERP_LAT_PARAM_ID),)))
external_to_process.add((gps_stream, (Parameter.query.get(INTERP_LON_PARAM_ID),)))
return external_to_process
@log_timing(log)
def _initialize(self):
"""
Initialize stream request. Computes data sources / parameters
:return:
"""
# Build our list of internally requested parameters
if self.requested_parameters:
internal_requested = [p for p in self.stream_key.stream.parameters if p.id in self.requested_parameters]
else:
internal_requested = self.stream_key.stream.parameters
pressure_depth = Parameter.query.get(PRESSURE_DEPTH_PARAM_ID)
if pressure_depth in internal_requested and not self._is_pressure_depth_valid(self.stream_key):
log.debug('<%s> removing invalid pressure_depth from requested parameters', self.request_id)
internal_requested.remove(pressure_depth)
log.debug('<%s> removing invalid depth computed from invalid pressure_depth from requested parameters', self.request_id)
for param in internal_requested:
if param.name == DEPTH_PARAMETER_NAME:
internal_requested.remove(param)
self.requested_parameters = internal_requested
# Identify internal parameters needed to support this query
primary_internals = self.stream_key.stream.needs_internal(internal_requested)
log.debug('<%s> primary stream internal needs: %r', self.request_id, primary_internals)
self.stream_parameters[self.stream_key] = primary_internals
if self.execute_dpa:
# Identify external parameters needed to support this query
external_to_process = self.stream_key.stream.needs_external(internal_requested)
log.debug('<%s> primary stream external needs: %r', self.request_id, external_to_process)
if external_to_process:
stream_parameters, found, external_unfulfilled = self._locate_externals(external_to_process)
for sk in stream_parameters:
self.stream_parameters.setdefault(sk, set()).update(stream_parameters[sk])
self.unfulfilled = external_unfulfilled
for sk in found:
self.external_includes.setdefault(sk, set()).update(found[sk])
# Now identify any parameters needed for mobile assets
external_to_process = self._get_mobile_externals()
if external_to_process:
stream_parameters, found, external_unfulfilled = self._locate_externals(external_to_process)
for sk in stream_parameters:
self.stream_parameters.setdefault(sk, set()).update(stream_parameters[sk])
self.unfulfilled = self.unfulfilled.union(external_unfulfilled)
for sk in found:
self.external_includes.setdefault(sk, set()).update(found[sk])
if self.unfulfilled:
log.warn('<%s> Unable to find sources for the following params: %r',
self.request_id, self.unfulfilled)
@log_timing(log)
def _collapse_times(self):
"""
Collapse request times to match available data
:return:
"""
if self.stream_key.is_virtual:
# collapse to smallest of all source streams
tr = self.time_range.copy()
for sk in self.stream_parameters:
if sk.is_virtual:
continue
tr = tr.collapse(get_available_time_range(sk))
new_time_range = self.time_range.collapse(tr)
if new_time_range != self.time_range:
log.info('<%s> Collapsing requested time range: %s to available time range: %s',
self.request_id, self.time_range, new_time_range)
self.time_range = new_time_range
else:
# collapse to primary stream
new_time_range = self.time_range.collapse(get_available_time_range(self.stream_key))
if new_time_range != self.time_range:
log.info('<%s> Collapsing requested time range: %s to available time range: %s',
self.request_id, self.time_range, new_time_range)
self.time_range = new_time_range
@log_timing(log)
def find_stream(self, stream_key, poss_params, stream=None):
log.debug('find_stream(%r, %r, %r)', stream_key, poss_params, stream)
subsite = stream_key.subsite
node = stream_key.node
sensor = stream_key.sensor
stream_dictionary = build_stream_dictionary()
param_streams = []
for p in poss_params:
if stream is None:
param_streams.append((p, [s.name for s in p.streams]))
else:
param_streams.append((p, [stream.name]))
# First, try to find the stream on the same sensor
for param, search_streams in param_streams:
sk = self._find_stream_same_sensor(stream_key, search_streams, stream_dictionary)
if sk:
return sk, param
# Attempt to find an instrument at the same depth (if not mobile)
if not stream_key.is_mobile:
nominal_depth = NominalDepth.get_nominal_depth(subsite, node, sensor)
if nominal_depth is not None:
co_located = nominal_depth.get_colocated_subsite()
for param, search_streams in param_streams:
sk = self._find_stream_from_list(stream_key, search_streams, co_located, stream_dictionary)
if sk:
return sk, param
# Attempt to find an instrument on the same node
for param, search_streams in param_streams:
sk = self._find_stream_same_node(stream_key, search_streams, stream_dictionary)
if sk:
return sk, param
# Not found at same depth, attempt to find nearby (if not mobile)
if not stream_key.is_mobile:
nominal_depth = NominalDepth.get_nominal_depth(subsite, node, sensor)
if nominal_depth is not None:
max_depth_var = MAX_DEPTH_VARIANCE_METBK if 'METBK' in sensor else MAX_DEPTH_VARIANCE
nearby = nominal_depth.get_depth_within(max_depth_var)
for param, search_streams in param_streams:
sk = self._find_stream_from_list(stream_key, search_streams, nearby, stream_dictionary)
if sk:
return sk, param
return None, None
@staticmethod
def _find_stream_same_sensor(stream_key, streams, stream_dictionary):
"""
Given a primary source, attempt to find one of the supplied streams from the same instrument
:param stream_key:
:param streams:
:return:
"""
log.debug('_find_stream_same_sensor(%r, %r, STREAM_DICTIONARY)', stream_key, streams)
method = stream_key.method
subsite = stream_key.subsite
node = stream_key.node
sensor = stream_key.sensor
# Search the same reference designator
for stream in streams:
sensors = stream_dictionary.get(stream, {}).get(method, {}).get(subsite, {}).get(node, [])
if sensor in sensors:
return StreamKey.from_dict({
"subsite": subsite,
"node": node,
"sensor": sensor,
"method": method,
"stream": stream
})
@staticmethod
def _find_stream_from_list(stream_key, streams, sensors, stream_dictionary):
log.debug('_find_stream_from_list(%r, %r, %r, STREAM_DICTIONARY)', stream_key, streams, sensors)
method = stream_key.method
subsite = stream_key.subsite
designators = [(c.subsite, c.node, c.sensor) for c in sensors]
for stream in streams:
for method in StreamRequest._get_potential_methods(method, stream_dictionary):
subsite_dict = stream_dictionary.get(stream, {}).get(method, {}).get(subsite, {})
for _node in subsite_dict:
for _sensor in subsite_dict[_node]:
des = (subsite, _node, _sensor)
if des in designators:
return StreamKey.from_dict({
"subsite": subsite,
"node": _node,
"sensor": _sensor,
"method": method,
"stream": stream
})
@staticmethod
def _find_stream_same_node(stream_key, streams, stream_dictionary):
"""
Given a primary source, attempt to find one of the supplied streams from the same instrument,
same node or same subsite
:param stream_key: StreamKey - defines the source of the primary stream
:param streams: List - list of target streams
:return: StreamKey if found, otherwise None
"""
log.debug('_find_stream_same_node(%r, %r, STREAM_DICTIONARY)', stream_key, streams)
method = stream_key.method
subsite = stream_key.subsite
node = stream_key.node
for stream in streams:
for method in StreamRequest._get_potential_methods(method, stream_dictionary):
sensors = stream_dictionary.get(stream, {}).get(method, {}).get(subsite, {}).get(node, [])
if sensors:
return StreamKey.from_dict({
"subsite": subsite,
"node": node,
"sensor": sensors[0],
"method": method,
"stream": stream
})
@staticmethod
def _get_potential_methods(method, stream_dictionary):
"""
When trying to resolve streams, an applicable stream may have a subtlely different method
(e.g. 'recovered_host' vs. 'recovered_inst'). This function is used to identify all related methods
within a stream dictionary so that streams can be resolved properly despite these minor differences.
"""
method_category = None
if "streamed" in method:
method_category = "streamed"
elif "recovered" in method:
method_category = "recovered"
elif "telemetered" in method:
method_category = "telemetered"
if not method_category:
log.warn("<%s> Unexpected method, %s, encountered during stream resolution."
" Only resolving streams whose methods match exactly.", method)
return method
valid_methods = []
for stream in stream_dictionary:
for method in stream_dictionary[stream]:
if method_category in method and "bad" not in method:
valid_methods.append(method)
return valid_methods
def interpolate_from_stream_request(self, stream_request):
source_sk = stream_request.stream_key
target_sk = self.stream_key
if source_sk in stream_request.datasets and target_sk in self.datasets:
for param in stream_request.requested_parameters:
self.datasets[target_sk].interpolate_into(source_sk, stream_request.datasets[source_sk], param)
self.external_includes.setdefault(source_sk, set()).add(param)
def compute_request_size(self, size_estimates=SIZE_ESTIMATES):
"""
Estimate the time and size of a NetCDF request based on previous data.
:param size_estimates: dictionary containing size estimates for each stream
:return: size estimate (in bytes) - also populates self.size_estimate
"""
default_size = DEFAULT_PARTICLE_DENSITY # bytes / particle
size_estimate = sum((size_estimates.get(stream.stream_name, default_size) *
util.metadata_service.get_particle_count(stream, self.time_range)
for stream in self.stream_parameters))
return int(math.ceil(size_estimate))
@staticmethod
def compute_request_time(file_size):
return max(MINIMUM_REPORTED_TIME, file_size * SECONDS_PER_BYTE)
| oceanobservatories/stream_engine | util/stream_request.py | stream_request.py | py | 36,193 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "engine.app.config.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "engine.app.config",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "engine... |
34424513905 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Nam Zeng
# @Time : 2018/12/6 10:38
# @Desc : 用牛顿插值法拟合经过给定点的曲线
# x0 | y0
# x1 | y1 y10
# x2 | y2 y21 y210
import matplotlib.pyplot as plt
import numpy as np
def newton_interpolate(x, y):
"""
:param x: 点集的x坐标集x[]
:param y: 点集的y坐标集y[]
:return: 拟合出的曲线系数列表coe[]
"""
n = len(x)
mat = np.zeros((n, n), dtype=float)
# 将矩阵的第一列元素分别赋值为y[i]
for i in range(n):
mat[i][0] = y[i]
for i in range(1, n): # 对于第1~n-1列
for j in range(i, n): # 每列从上到下填入各差商值,即从三角形的斜边到直角边
mat[j][i] = (mat[j][i - 1] - mat[j - 1][i - 1]) / (x[j] - x[j - i])
# 输出对角线系数
coe = []
for i in range(n):
coe.append(mat[i][i])
return coe
def draw(x, y, coe):
# 画图
plt.title("Newton Interpolation")
plt.scatter(x, y, label="discrete data", color='green')
ltx = np.linspace(-5, 5, 300)
lty = [coe[0] + coe[1] * (xi - x[0]) + coe[2] * (xi - x[0]) * (xi - x[1])
+ coe[3] * (xi - x[0]) * (xi - x[1]) * (xi - x[2])
for xi in ltx]
plt.plot(ltx, lty, label="fitting curve", color='red')
plt.legend(loc="upper right")
plt.show()
if __name__ == '__main__':
# 设置拟合数据,求三次牛顿多项式
x = [3, 2, -1, -2]
y = [-23, -4, 5, 12]
coe = newton_interpolate(x, y)
print("拟合曲线的系数(b0~bn-1):\n", coe)
draw(x, y, coe)
| NAMZseng/numerical-analysis | 5_newton_Interpolation.py | 5_newton_Interpolation.py | py | 1,620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pypl... |
25161342964 | #!/usr/bin/python
import sys
import numpy as np
import scipy.signal as signal
from scipy.io import wavfile
import sklearn as sk
import librosa
import heartpy as hp
import soundfile as sf
from scipy.fft import fft, ifft, fftfreq
import matplotlib.pyplot as plt
src_signal = librosa.load("LAB5_500HzFHR.wav", sr=None)
sample_rate = src_signal[1]
source_signal = src_signal[0]
N = source_signal.size
Ts = 1.0 / sample_rate
fft_src = fft(source_signal)
yf = 2.0/N * np.abs(fft_src[:N//2])
x = np.linspace(0.0, N*Ts, N)
xf = np.linspace(0.0, 1.0/(2.0*Ts), N//2)
b, a = signal.iirdesign(1, 3, gpass=1, gstop=40, fs=sample_rate)
signal_filtered = signal.lfilter(b, a, source_signal)
freq_s = fftfreq(N, Ts)[:N//2]
fft_flt = fft(signal_filtered)
plt.plot(freq_s, yf)
plt.show(block=False)
# print(b)
# print(a)
plt.plot(freq_s, 2.0/N * np.abs(fft_flt[0:N//2]))
plt.show(block=False)
plt.plot(x, signal_filtered)
plt.show(block=False)
# print(signal_filtered)
sf.write("test.wav", signal_filtered, sample_rate)
working_data, measures = hp.process(source_signal, 500.0)
working_data_flt, measures_flt = hp.process(signal_filtered, 500.0)
bpm_flt = hp.plotter(working_data, measures, show=True)
plt.show(block=False)
# hp.plotter(working_data, measures)
bpm_flt = hp.plotter(working_data_flt, measures_flt, show=True)
plt.show(block=False)
y1 = 0.01 * np.sin(1.2 * x * 2 * np.pi)
y2 = -0.005 * np.sin(1.6 * x * 2 * np.pi)
Y = y1+y2
working_data_t, measures_t = hp.process(Y, 500.0)
bpm_flt = hp.plotter(working_data_t, measures_t, show=True)
plt.show(block=False)
fig, (ax1, ax2) = plt.subplots(2)
ax1.plot(x, y1+y2)
ax2.plot(x, signal_filtered)
plt.show() | cKaliban/DSP-Architecture-Lab | Python-WSL/signal_processing.py | signal_processing.py | py | 1,670 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "librosa.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.fft.fft",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_numbe... |
3975828154 | import numpy as np
import torch
from torch._C import dtype
import torch.nn as nn
import random
from filters import BP_filter
import matplotlib.pyplot as plt
class Random_shift(nn.Module):
# Randomly shifts the track
def __init__(self, shift_max):
super().__init__()
self.shift_max = shift_max
def forward(self, noisy_track, clean_track):
shift = random.randint(0, self.shift_max)
noisy_track = torch.roll(noisy_track, shift, dims=-1)
clean_track = torch.roll(clean_track, shift, dims=-1)
return noisy_track, clean_track
class Remix(nn.Module):
# Randomly shifts the noise in the track
def forward(self, noisy_track, clean_track):
noise = noisy_track - clean_track
perm = torch.randperm(noise.size()[-1])
noisy_track[-1][-1][:] = clean_track[-1][-1][:] + noise[-1][-1][perm]
return noisy_track, clean_track
class Band_mask(nn.Module):
# Maskes bands of frequencies
def __init__(self, sample_rate, min_freq, max_freq):
super().__init__()
self.sample_rate = sample_rate
self.filter = BP_filter(min_freq, max_freq)
def forward(self, noisy_track, clean_track):
noisy_track = self.filter.forward(noisy_track)
clean_track = self.filter.forward(clean_track)
return noisy_track, clean_track
def test():
x = torch.randn(1,1,1000)
y = torch.randn(1,1,1000)
t = np.linspace(0,1000, num=1000, dtype=np.float32)
shift = Random_shift(10)
remix = Remix()
band_mask = Band_mask(48000, 0.1, 0.15)
shifted_x, shifted_y = shift.forward(x, y)
remixed_x, remixed_y = remix.forward(x, y)
masked_x, masked_y = band_mask.forward(x, y)
plt.figure()
plt.title('x')
plt.plot(t, x[-1][-1])
plt.figure()
plt.title('shifted x')
plt.plot(t, shifted_x[-1][-1])
plt.figure()
plt.title('remixed x')
plt.plot(t, remixed_x[-1][-1])
plt.figure()
plt.title('masked x')
plt.plot(t, masked_x[-1][-1])
plt.show()
if __name__ == '__main__':
test()
| GianMarcoZampa/Progetto-DACLS | augmentation.py | augmentation.py | py | 2,144 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.roll",
"line_num... |
26355670681 | import random
import json
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, random_split, ConcatDataset
import pandas as pd
import os
import torchaudio
from dataset import snoring_preprocess
from dataset import dataset_utils
# TODO: some inspections for arguments
# TODO: ABC? (to a meta dataset)
# TODO: split rate for testing --> train and testing dataset building need to be separated
# TODO: Considering a reusing dataset builder (Can we minimize the building effort?)
class MetaDataset(Dataset):
def __init__(self, loader, data_refs, dataset_name=None, transform=None, target_loader=None):
self.loader = loader
self.data_refs = data_refs
self.dataset_name = dataset_name
self.transform = transform
self.target_loader = target_loader
self.input_key = 'input'
self.target_key = 'target'
self.log_dataset()
self.dataflow_func = self.get_dataflow_format()
def __len__(self):
return len(self.data_refs)
def __getitem__(self, idx):
return self.dataflow_func(idx)
def get_dataflow_format(self):
first_sample = self.data_refs[0]
if isinstance(first_sample, dict):
assert self.input_key in first_sample, 'No input data reference'
if self.target_key in first_sample:
return self.get_datapair_from_dict_seq
else:
return self.get_input_from_dict_seq
elif isinstance(first_sample, str):
return self.get_input_from_str_seq
else:
raise ValueError('Unknown reference format.')
def get_input_from_str_seq(self, idx):
input_data = self.loader(self.data_refs[idx])
if self.transform is not None:
input_data = self.transform(input_data)
return input_data
def get_input_from_dict_seq(self, idx):
input_data = self.loader(self.data_refs[idx]['input'])
if self.transform is not None:
input_data = self.transform(input_data)
return input_data
def get_datapair_from_dict_seq(self, idx):
input_data = self.loader(self.data_refs[idx]['input'])
if self.target_loader is not None:
target = self.target_loader(self.data_refs[idx]['target'])
else:
target = self.data_refs[idx]['target']
if self.transform is not None:
input_data, target = self.transform(input_data, target)
return {'input': input_data, 'target': target}
def log_dataset(self):
pass
def get_ref_mapping():
pass
def wav_loader(filename, sr=None, channels=1):
y = dataset_utils.get_pydub_sound(
filename, 'wav', sr, channels)
sr = y.frame_rate
waveform = np.array(y.get_array_of_samples(), np.float32)
return waveform, sr
# def build_single_snoring_dataset(
# data, data_refs, loader, data_transform=None, dataset_name=None):
# data_config = {
# 'data': data,
# 'data_refs': data_refs,
# 'loader': loader,
# 'dataset_name': dataset_name,
# 'transform': data_transform,
# 'target_loader': None,
# }
# data_config['data_refs'] = train_refs
# train_dataset = SnoringDataset(**data_config)
# data_config['data_refs'] = valid_refs
# data_config['transform'] = None
# valid_dataset = SnoringDataset(**data_config)
# return train_dataset, valid_dataset
# def get_snoring_refs(label_path, split_json):
# local_ref = json.load(split_json)
# label_table = pd.read_csv(label_path)
# return path_refs
# XXX: change to JSON format
def get_snoring_refs(data_ref_path, data_root):
data_refs = pd.read_csv(data_ref_path)
path_refs = snoring_preprocess.get_path_refs_fast(data_root, data_refs, suffix='wav')
return path_refs
# XXX: Put in here currently
def snoring_transform(waveform):
waveform = wavform_transform(waveform)
spec = wavform_to_spec(waveform)
spec = spec_transform(spec)
return spec
# XXX: return sr?
def torchaudio_loader(path):
waveform, sr = torchaudio.load(path, normalize=True)
return waveform
def build(data_csv, train_json, valid_json, dataset_name):
train_data_refs = get_snoring_refs(data_csv, train_json)
valid_data_refs = get_snoring_refs(data_csv, valid_json)
loader = torchaudio_loader
train_dataset = MetaDataset(
loader, train_data_refs, dataset_name, snoring_transform, target_loader=None)
valid_dataset = MetaDataset(
loader, valid_data_refs, dataset_name, snoring_transform, target_loader=None)
return train_dataset, valid_dataset
def build_dataloader(total_data_config, train_batch_size):
total_train_dataset = []
total_valid_dataset = []
for data_config in total_data_config:
train_dataset, valid_dataset = build(**data_config)
total_train_dataset.append(train_dataset)
total_valid_dataset.append(valid_dataset)
concat_train_dataset = ConcatDataset(total_train_dataset)
concat_valid_dataset = ConcatDataset(total_valid_dataset)
train_loader = DataLoader(concat_train_dataset, train_batch_size, shuffle=True)
valid_loader = DataLoader(concat_valid_dataset, 1, shuffle=False)
return train_loader, valid_loader
def main():
data_root = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\pp\Samsung_Note10Plus_night\wave_split'
data_ref_path = r'C:\Users\test\Desktop\Leon\Datasets\ASUS_snoring_subset\pp\Samsung_Note10Plus_night\data.csv'
data_refs = pd.read_csv(data_ref_path)
path_refs = snoring_preprocess.get_path_refs_fast(data_root, data_refs, suffix='wav')
pass
# build_snoring_dataset(data_roots, data_ref_paths, train_batch_size, data_transform=None)
if __name__ == '__main__':
main() | wdwlinda/Snoring_Detection_full | dataset/dataset_builder.py | dataset_builder.py | py | 5,871 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "dataset.dataset_utils.get_pydub_sound",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "dataset.dataset_utils",
"line_number": 82,
"usage_type": "name"
},
{
... |
28044202513 | # Project) 오락실 Pang 게임 만들기
# [게임 조건]
# 1. 캐릭터는 화면 아래에 위치, 좌우로만 이동 가능
# 2. 스페이스를 누르면 무기를 쏘아 올림
# 3. 큰 공 1 개가 나타나서 바운스
# 4. 무기에 닿으면 공은 작은 크기 2 개로 분할, 가장 작은 크기의 공은 사라짐
# 5. 모든 공을 없애면 게임 종료 => 성공
# 6. 캐릭터는 공에 닿으면 게임 종료 => 실패
# 7. 시간 제한 99 초 초과 시 게임 종료 => 실패
# 8. FPS 는 30 으로 고정 => 필요 시 speed 값을 조정
# [게임 이미지]
# 1. 배경 : 640 * 480 (가로 세로) - background.png
# 2. 무대 : 640 * 50 - stage.png
# 3. 캐릭터 : 60 * 33 - character.png
# 4. 무기 : 20 * 430 - weapon.png
# 5. 공 : 160 * 160, 80 * 80, 40 * 40, 20 * 20 - balloon1.png ~ balloon4.png
######################################################
import os
import pygame
######################################################
# 기본 초기화 (반드시 해야 하는 것들)
pygame.init()
# 화면 크기 설정
screen_width = 640 # 가로 크기
screen_height = 480 # 세로 크기
screen = pygame.display.set_mode((screen_width, screen_height))
# 화면 타이틀 설정
pygame.display.set_caption("*** Pang pang ***")
# FPS 초당 프레임 수
clock = pygame.time.Clock()
######################################################
# 1. 사용자 게임 초기화 (배경 화면, 게임 이미지, 좌표, 속도, 폰트 등)
current_path = os.path.dirname(__file__) # 현재 파일의 위치 반환
image_path = os.path.join(current_path, "images") # 현재 폴더 위치에 images 폴더 위치를 더해 이미지가 있는 위치 반환
# 배경화면 만들기
background = pygame.image.load(os.path.join(image_path, "background.png")) # image_path 경로의 background.png 파일을 불러오기
# 스테이지 만들기
stage = pygame.image.load(os.path.join(image_path, "stage.png"))
stage_size = stage.get_rect().size
stage_height = stage_size[1] # 스테이지의 높이 위에 캐릭터를 두기 위해 사용
# 캐릭터 만들기
character = pygame.image.load(os.path.join(image_path, "character.png"))
character_size = character.get_rect().size
character_width = character_size[0]
character_height = character_size[1]
character_x_pos = (screen_width / 2) - (character_width / 2)
character_y_pos = screen_height - character_height - stage_height
running = True
while running:
dt = clock.tick(30)
# 2. 이벤트 처리 (키보드, 마우스 등)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# 3. 게임 캐릭터 위치 정의
# 4. 충돌 처리
# 5. 화면에 그리기
screen.blit(background, (0, 0))
screen.blit(stage, (0, screen_height - stage_height))
screen.blit(character, (character_x_pos, character_y_pos))
pygame.display.update()
pygame.quit() | asummerz/Python | pygame_project/1_frame_background_stage_character.py | 1_frame_background_stage_character.py | py | 2,936 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
37914829335 | import ipywidgets as widgets
def make_model_list_dropdown() -> widgets.Widget:
model_list_dropdown = widgets.Dropdown(
options=['Model 1', "Click to add new model..."],
value='Model 1',
description='Select Model',
style={
'description_width': '100px'
},
layout={
'width': '400px'
}
)
return model_list_dropdown
def make_model_list_box() -> widgets.Widget:
model_list_box = widgets.HBox(
children=[make_model_list_dropdown()],
layout={}
)
return model_list_box
def make_clear_button() -> widgets.Widget:
clear_button = widgets.Button(
description='Clear All Plots',
icon='chart-line',
style={
'button_color': '#9ecae1'
},
layout={'width': '400px'},
tooltip="Removes all traces from the small-angle scattering and dark field plots on the right."
)
return clear_button
def make_export_button() -> widgets.Widget:
export_button = widgets.Button(
description='Export Plot Data',
icon='save',
style={
'button_color': '#4292c6'
},
layout={'width': '400px'},
tooltip="Exports SAS and dark field data from the plots to csv files in the working directory."
)
return export_button
class ModelHeaderBox:
def __init__(self):
model_list = make_model_list_box()
clear_button = make_clear_button()
# clear_button.on_click(clear_plots_func)
export_button = make_export_button()
# export_button.on_click(export_data_func)
self.model_header_box = widgets.VBox(
children=[
model_list,
widgets.HBox(children=[clear_button, export_button])
]
)
| usnistgov/correlogram_tools | correlogram_tools/plotting_widget/model_header_box.py | model_header_box.py | py | 1,820 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ipywidgets.Dropdown",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Widget",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "ipywidgets.HBox",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ipywidgets.W... |
8059135342 | import pyttsx3
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
print("READ THIS")
print("WHAT YOUR JARVIS CAN DO")
print("Your Jarvis can search in chrome\n can summarize wikipedia\n can open google and youtube\n can show you IPL score\n can open National Geographic\n can recommend the best educational channel \n can open the famous coding YT Channel Code With Harry\n can show you recommended Hindi news channel\n Can plat latest music playlist having more than 1000 songs from T Series\n can show you recommended marathi news channel\n can open Adobe Reader 9\n can open amazon\n can also open recommended coding website of stack overflow\n can also tell present time\n can also open SonyLiv website")
print("Jarvis can also answer your questions as\n How are you?\n Who are you?\n How do you do?\n What are you doing?\n What is your name?\n Are you a robot? ")
print("Your Jarvis can also reply on your comments like\n Really!\n Hello\n I am fine\n")
print("Your Jarvis can quit by saying\n Quit \n Bye \n Goodbye \n Exit \n \n \n")
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
print("Enter your Name:")
username = str(input())
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak('Good Morning Sir')
elif hour>=12 and hour<18:
speak('Good Afternoon sir')
else:
speak('Good Evening sir')
speak('I am Jarvis. How can I help you?')
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening...')
r.pause_threshold = 1
audio = r.listen(source)
try:
print('Recognizing...')
query = r.recognize_google(audio, language='en')
print(f"User said: {query}\n")
except Exception as e:
print("say that again pls sir")
return "None"
return query
if __name__ == "__main__":
wishMe()
while True:
query = takeCommand().lower()
#executing task based on query
if'wikipedia' in query:
speak('Searching wikipedia')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif'youtube' in query:
webbrowser.open("www.youtube.com")
speak("This is the official website of YouTube")
elif'google' in query:
webbrowser.open("www.google.com")
speak("This is the official website of Google.")
elif'how are you' in query:
speak("I am fine sir")
print("I am fine sir")
elif'what are you doing' in query:
speak("I am spending time with you")
print("I am spending time with you")
elif'your name' in query:
speak("My name is Jarvis sir")
print("My name is Jarvis, sir")
elif'who are you' in query:
speak("I am Jarvis sir")
print("I am Jarvis sir")
elif'well done' in query:
speak("Thank you its my pleasure")
print("Thank you its my pleasure")
elif'thank you' in query:
speak("You are welcome sir")
print("You are welcome sir")
elif'sports' in query:
webbrowser.open("https://news.google.com/topics/CAAqJggKIiBDQkFTRWdvSUwyMHZNRFp1ZEdvU0FtVnVHZ0pWVXlnQVAB?hl=en-IN&gl=IN&ceid=IN:en")
speak("These are sports news from google news.")
elif'exit' in query:
speak(f"Goodbye {username}, Take care")
print(f"Goodbye {username}, Take care")
exit()
elif'bye' in query:
speak("Bye sir see you next time")
exit()
elif'goodbye' in query:
speak("Goodbye sir, see you next time")
exit()
elif'music'in query:
webbrowser.open("https://www.youtube.com/aashiqui2/videos")
speak("These are the music from T-Series YouTube Channel")
elif'really' in query:
speak("yeah sir")
elif'are you robot' in query:
speak("yes sir, I am Jarvis, a desktop assistant.")
print("yes sir, I am Jarvis, a desktop assistant.")
elif'scientific hindi' in query:
webbrowser.open("https://www.youtube.com/c/FactTechz/videos")
speak("This is the most famous science fiction YouTube channel in hindi named Facttechz")
elif'hindi science' in query:
webbrowser.open("https://www.youtube.com/c/FactTechz/videos")
elif'hindi scientific' in query:
webbrowser.open("https://www.youtube.com/c/FactTechz/videos")
speak("This is the most famous science fiction YouTube channel in hindi named Facttechz")
elif'play latest music' in query:
webbrowser.open("https://www.youtube.com/watch?v=b8--JS9lRnI&list=PL095CA373D64B70C0")
speak("This is the playlist of latest song by T series")
elif'hello' in query:
speak(f"hello {username}")
print(f"hello {username}")
elif'quit' in query:
speak("Bye")
print("Bye")
exit()
elif'scientific channel' in query:
webbrowser.open("https://www.youtube.com/c/NatGeo/videos")
speak("This is the best scientific YouTube channel names National Geographic")
elif'codewithharry' in query:
webbrowser.open("https://www.youtube.com/c/CodeWithHarry/videos")
speak("This is the best coding YouTube channel named Code With Harry")
elif'best educational channel' in query:
webbrowser.open("https://www.youtube.com/c/DearSir/videos")
speak("This is the most best educational youtube channel, who teaches English and Mathematics for seondary, high secondary and students who are preparing themselves for competitive exams and also teaches us English speaking and short tricks for Maths. This channel is named as Dear Sir")
elif'time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is{strTime}")
elif'stack overflow' in query:
webbrowser.open("www.stackoverflow.com")
speak("This is one of the recommended website named stack overflow which teaches you coding")
elif'how do you do' in query:
speak("I am fine sir, what about you")
print("I am fine sir, what about you?")
elif'am fine' in query:
speak("Nice to hear that from you sir")
print("Nice to hear that from you sir")
elif'hindi news' in query:
webbrowser.open("https://www.youtube.com/watch?v=SmQqAnKG6zs")
speak("This is the live news from hindi")
elif'live news' in query:
webbrowser.open("https://www.youtube.com/watch?v=9Auq9mYxFEE")
speak("This is the live news from Sky News in English")
elif'marathi news' in query:
webbrowser.open("https://www.youtube.com/watch?v=itJLgnqZ3U8")
speak("This is the live news from ABP maza in marathi")
elif'amazon' in query:
webbrowser.open("https://www.amazon.in/?ext_vrnc=hi&tag=googhydrabk-21&ascsubtag=_k_EAIaIQobChMIipy3m8iB7QIVBpZLBR3Lqwg6EAAYASAAEgJNlfD_BwE_k_&ext_vrnc=hi&gclid=EAIaIQobChMIipy3m8iB7QIVBpZLBR3Lqwg6EAAYASAAEgJNlfD_BwE")
speak("This the shopping website named amazon")
elif'shopping' in query:
webbrowser.open("https://www.amazon.in/?ext_vrnc=hi&tag=googhydrabk-21&ascsubtag=_k_EAIaIQobChMIipy3m8iB7QIVBpZLBR3Lqwg6EAAYASAAEgJNlfD_BwE_k_&ext_vrnc=hi&gclid=EAIaIQobChMIipy3m8iB7QIVBpZLBR3Lqwg6EAAYASAAEgJNlfD_BwE")
speak("This the shopping website named amazon")
elif'sonyliv' in query:
webbrowser.open("https://www.sonyliv.com/")
speak("This is the sony leave website. You can use it for watching TV shows")
elif'best entertaining' in query:
webbrowser.open("https://www.sonyliv.com/")
speak("This is the sony leave website. You can use it for watching TV shows")
elif'entertainment' in query:
webbrowser.open("https://www.sonyliv.com/")
speak("This is the sonyliv website. You can use it for watching TV shows")
elif'national geographic'in query:
webbrowser.open("https://www.youtube.com/c/NatGeo/videos")
speak("This is the best scientific YouTube channel names National Geographic")
elif'hindi news' in query:
webbrowser.open("https://www.youtube.com/watch?v=SmQqAnKG6zs")
speak("This is the live news from hindi")
elif'gmail' in query:
webbrowser.open("https://mail.google.com/mail/u/0/#inbox")
speak("Starting Google Mail")
elif'meet' in query:
webbrowser.open("https://meet.google.com/")
speak("Starting Google Meet")
| sarthak-dhonde/jarvis | Jarvis.py | Jarvis.py | py | 9,194 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pyttsx3.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "speech_reco... |
41413125741 | #!/usr/bin/env python3
import os
import datetime
# Complete the time_delta function below.
def time_delta(t1, t2):
pattern = '%a %d %b %Y %H:%M:%S %z'
date1 = int(datetime.datetime.strptime(t1, pattern).timestamp())
date2 = int(datetime.datetime.strptime(t2, pattern).timestamp())
print(abs(date1 - date2))
return abs(date1 - date2)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
t1 = input()
t2 = input()
delta = time_delta(t1, t2)
fptr.write(str(delta) + '\n')
fptr.close()
| iliankostadinov/hackerrank-python | time_delta.py | time_delta.py | py | 620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 11,
"usage_type": "call"
},
{
"api_... |
25082622727 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('event/', views.event, name='event'),
path('place/', views.place, name='place'),
path('imply/', views.imply, name='imply'),
path('insert_event/', views.insert_event, name='insert_event'),
path('insert_place/', views.insert_place, name='insert_place'),
path('insert_imply/', views.insert_imply, name='insert_imply'),
path('insert_picture/', views.insert_picture, name='insert_picture'),
path('participate_event/', views.participate_event, name='participate_event'),
path("insert_user/", views.insert_user, name="insert_user"),
path("insert_user_direct/", views.insert_user_direct, name="insert_user_direct"),
path("id_duplicate_check/", views.id_duplicate_check, name="id_duplicate_check"),
path("login/", views.login, name="login"),
path("auth/", views.auth, name="auth"),
path("ranking/", views.ranking, name="ranking"),
path("participating/", views.participating, name="participating"),
path('select_auth_method/', views.select_auth_method, name='select_auth_method'),
path('finish_auth_check/', views.finish_auth_check, name='finish_auth_check'),
path('place_auth_info/', views.place_auth_info, name='place_auth_info'),
]
| leehj8896/capstone-server_ec2 | print_db/urls.py | urls.py | py | 1,393 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
5493453692 | import re
import sys
from io import StringIO
from colorama import Fore, Style
from diff_match_patch import diff_match_patch
def highlight_differences(text1, text2):
dmp = diff_match_patch()
diffs = dmp.diff_main(text1, text2)
dmp.diff_cleanupSemantic(diffs)
highlighted_diff = ""
for diff in diffs:
if diff[0] == -1:
highlighted_diff += Fore.RED + diff[1] + Style.RESET_ALL
elif diff[0] == 1:
highlighted_diff += Fore.GREEN + diff[1] + Style.RESET_ALL
else:
highlighted_diff += diff[1]
return highlighted_diff
def capture_output(func, *args, **kwargs) -> str:
original_stdout = sys.stdout
sys.stdout = StringIO()
func(*args, **kwargs)
output_value = sys.stdout.getvalue()
sys.stdout = original_stdout
return output_value
def compare_output(output_value, expected_output_file) -> bool:
with open(expected_output_file, 'r') as f:
expected_output = f.read().strip()
output_value = output_value.strip()
output_value = re.sub(r"ic\|.*\n", "", output_value)
if output_value != expected_output:
print("Output does not match the expected output.")
print()
highlighted_diff = highlight_differences(expected_output, output_value)
print(highlighted_diff)
print()
return output_value == expected_output
| algFame/geektrust | rider-sharing/src/utils.py | utils.py | py | 1,384 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "diff_match_patch.diff_match_patch",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 17,
"usage_type": "name"
},
{
"api_name":... |
43535013974 | import argparse
import sys
import os, os.path
import redis
import argparse
import logging, logging.config
from hashlib import md5
argparser = argparse.ArgumentParser()
argparser.add_argument("--data", type=str, required=True)
argparser.add_argument("--db-host", type=str, required=True)
argparser.add_argument("--db-port", type=int, required=True)
args = argparser.parse_args()
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def main():
db_conn = redis.Redis(host=args.db_host, port=args.db_port, db=0)
set_root_segments(args.data, db_conn)
for dir in os.listdir(args.data):
process_data_segment(args.data, dir, db_conn)
def set_root_segments(data_path, db_conn):
root_dirs = os.listdir(data_path)
logging.debug("Setting root dirs: {}".format(root_dirs))
db_conn.sadd("/root", *root_dirs)
def process_data_segment(data_root, data_path, db_conn):
logging.debug("Processing data entry {}".format(data_path))
data_entries = os.listdir(os.path.join(data_root, data_path))
key_hash = md5(data_path.encode('utf-8')).hexdigest()
db_conn.set(key_hash, data_path)
if data_entries == ["_data"]:
logging.debug("Saving data for entry {}".format(data_path))
db_conn.set("{}:data".format(data_path),
open(os.path.join(data_root, data_path, "_data")).read())
else:
logging.debug("Setting data child entries {}".format(data_entries))
db_conn.sadd("{}:next".format(data_path), *data_entries)
for entry in os.listdir(os.path.join(data_root, data_path)):
new_data_path = os.path.join(data_path, entry)
process_data_segment(data_root, new_data_path, db_conn)
if __name__ == "__main__":
main()
| HappyNationHack/team_Help_Desk | scripts/load_data.py | load_data.py | py | 1,742 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.DE... |
3540804132 | """============================================================================
Có 60% người mua xe thể thao là nam giới.
1. Chọn loại phân phối. Tạo ra 10 mẫu (ngẫu nhiên) theo mô tả trên
với số lần lặp lại các thí nghiệm là 1000
2. Vẽ histogram quan sát. Nhận xét.
3. Trong 10 chủ xe thể thao được chọn ngẫu nhiên, tính xs có 7 nam giới.
============================================================================"""
import math
import seaborn as sns
from scipy.stats import binom
#------------------------------------------------------------------------------
# Hàm tính tổ hợp n chập k
# Vơi Python 3.8 thì dùng math.comb(n, k)
#------------------------------------------------------------------------------
def combination(n, k):
# n!/(k!)(n-k)!
numerator = math.factorial(n)
denominator = math.factorial(k) * math.factorial(n - k)
return (numerator / denominator)
# 1. Chọn loại phân phối. Tạo ra 10 mẫu với 1000 lần lặp lại thí nghiệm
# X ~ Binomial(n, p)
# n: số mẫu thử nghiệm
# p: xác suất thành công
# size: số lần lặp lại (trials) thí nghiệm.
n = 10
p = 0.6
size = 1000
data_binom = binom.rvs(n=n, p=p, size = size)
# 2. Vẽ histogram quan sát. Nhận xét.
ax = sns.distplot(data_binom, kde = False, color = 'blue',
hist_kws = {'linewidth': 15, 'alpha': 1})
ax.set(xlabel = 'Binomial Distribution', ylabel = 'Frequency')
# 3. Trong 10 chủ xe thể thao được chọn ngẫu nhiên, tính xs có 7 nam giới.
# f(k) = P(X = k) = P_x = n!/(k!)(n-k)! x p^k x (1-p)^(n-k)
k = 7
P_X_k = combination(n, k) * math.pow(p, k) * math.pow(1 - p, n - k)
print('P(X = 7) =', P_X_k)
| lualua0909/Math-4-ML-lds3 | B6. Probability/Ex2 - Cau 2.py | Ex2 - Cau 2.py | py | 1,885 | python | vi | code | 10 | github-code | 1 | [
{
"api_name": "math.factorial",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.factorial",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.stats.binom.rvs",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "scipy.stats.binom... |
11295990652 | """
Constraints:
- the position of the container has to be in range of device capacity
based on device version
- on creation it checks there is a proper amount of chambers associated
with this object
- there cannot be any other container at the same place in the same device
"""
from django.db import models
import uuid as UUID
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from django.db.models import Q
from API.managers import ContainerManager
class Container(models.Model):
# universal identifier
uuid = models.UUIDField(
primary_key=True,
default=UUID.uuid4,
editable=False,
unique=True
)
# version that specifies capacity
version = models.ForeignKey(
"ContainerVersion",
on_delete=models.PROTECT,
related_name="existing_containers"
)
# reference to device this container belongs to
device = models.ForeignKey(
"Device",
on_delete=models.CASCADE,
related_name="containers"
)
# position in the container
position = models.PositiveSmallIntegerField()
# time this container was refilled at
last_refill = models.DateTimeField(null=True, blank=True)
# custom manager
objects = ContainerManager()
@property
def capacity(self):
return self.version.capacity
@property
def fill_status(self):
"""
Returns number of percents this container is filled in calculated over
every chamber inside.
"""
chambers = self.chambers.filter(container=self)
totalCapacity = self.capacity
fullChambers = 0
for chamb in chambers:
if chamb.is_full:
fullChambers += 1
if totalCapacity == 0:
return 0
return round(100 * fullChambers / totalCapacity)
def clean(self):
# validate if the position is in the correct range
if self.position < 0 or self.position >= self.device.capacity:
raise ValidationError(
_("Position value outside of container's capacity"),
code="invalid_value"
)
# make sure there are no two containers at the same place
if Container.objects.filter(
~Q(uuid=self.uuid) &
Q(device=self.device) &
Q(position=self.position)
).exists():
raise ValidationError(
_("Two containers at the same place."),
code="integrity_error"
)
def __str__(self):
return f"In {self.device}, at pos. {self.position}"
| AmbientELab-Group/Medbox-server | app/API/models/container.py | container.py | py | 2,660 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.UUIDField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name"... |
43565545552 | from django.conf.urls import patterns, include, url, static
from django.conf import settings
from django.views.generic import TemplateView
from django.contrib import admin
from django.views.generic import RedirectView
from accounts.views import UserProfileUpdateView
from django.views.defaults import permission_denied
admin.autodiscover()
urlpatterns = []
colab_plugins = settings.COLAB_APPS
for plugin in colab_plugins:
colab_plugin = colab_plugins.get(plugin)
plugin_blacklist = colab_plugin.get('blacklist')
if plugin_blacklist:
for plugin_url in plugin_blacklist:
final_url = colab_plugin.get('urls').get('prefix')
final_url += plugin_url
urlpatterns += patterns(
'', url(final_url, permission_denied))
if hasattr(settings, 'BLACKLIST'):
core_blacklist = settings.BLACKLIST
for core_url in core_blacklist:
urlpatterns += patterns('', url(core_url, permission_denied))
urlpatterns += patterns(
'',
url(r'^$', RedirectView.as_view(url=settings.COLAB_HOME_URL), name='home'),
url(r'^robots.txt$', 'colab.home.views.robots', name='robots'),
url(r'^dashboard$', 'colab.home.views.dashboard', name='dashboard'),
url(r'^search/', include('colab.search.urls')),
url(r'^rss/', include('colab.rss.urls')),
url(r'^account/', include('colab.accounts.urls')),
url(r'^myaccount/(?P<route>.*)$',
'colab.accounts.views.myaccount_redirect', name='myaccount'),
url(r'^colab/admin/', include(admin.site.urls)),
url(r'', include('colab.plugins.urls')),
)
| colab/colab | colab/urls.py | urls.py | py | 1,586 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.COLAB_APPS",
"line_number": 13,
"usage_type": "attribute"
},
... |
71311124194 | import requests
from xml.etree import ElementTree
class CTAInterface():
def __init__(self, api_key):
self.api_key = api_key
self.base_url = 'http://lapi.transitchicago.com/api/1.0/ttarrivals.aspx'
def get_next_arrivals(self, station_id_list):
next_arrivals = list()
parameters = {'key': self.api_key}
for station_id in station_id_list:
parameters['mapid'] = station_id
response = requests.get(self.base_url, params=parameters)
next_arrivals += self.parse_xml_response(response.content)
return next_arrivals
@staticmethod
def parse_xml_response(xml_content):
next_arrivals = list()
xml_tree = ElementTree.fromstring(xml_content)
for node in xml_tree.iter('eta'):
this_arrival = [None] * 4
for child in node._children:
if child.tag == 'staNm':
this_arrival[0] = child.text
elif child.tag == 'rt':
this_arrival[1] = child.text
elif child.tag == 'stpDe':
this_arrival[2] = child.text
elif child.tag == 'arrT':
this_arrival[3] = child.text
next_arrivals.append(this_arrival)
return next_arrivals | efaurie/cta-train-tracker | src/CTAInterface.py | CTAInterface.py | py | 1,311 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 25,
"usage_type": "name"
}
] |
30419469650 | import requests
from bs4 import BeautifulSoup
from time import sleep
from lxml import html
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
" AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36"}
def get_url():
for count in range(0,121,20):
url = f"http://www.spisszkol.eu/typ/?wojewodztwo=dolnoslaskie&powiat=wroclawski&start={count}"
responce = requests.get(url, headers=headers,)
responce.encoding = "utf8"
soup = BeautifulSoup(responce.text, "lxml")
data = soup.find_all("div", class_="doc_entry")
for i in data:
card_url = i.find("a").get("href")
yield card_url
def array():
for list_card in get_url():
responce = requests.get(list_card, headers=headers)
responce.encoding = "utf8"
sleep(3)
soup = BeautifulSoup(responce.text, "lxml")
parsed = html.fromstring(responce.text)
emails = [e.attrib['title'] for e in parsed.xpath('//a[contains(@href, "email") and @title]')]
s = " ".join(emails)
data = soup.find("div", class_="page_body")
name = data.find("p", class_="map_title red").text
adres = data.find("p", itemprop="address").text
try:
telefon = data.find("span", itemprop="telephone").text
except Exception as e:
print(e)
telefon = None
yield name, adres, telefon,s
| bogdan-kurbanov/parcer | main.py | main.py | py | 1,468 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_nu... |
32425569605 | from irc.client import Event, ServerConnection
from timmy.db_access import settings
class AuthHandler:
def __init__(self):
self.auth_on_welcome = False
self.auth_type = ""
self.auth_data = ""
self.post_identify = ""
self.post_auth_sent = False
def init(self) -> None:
self.auth_on_welcome = settings.get_setting('auth_on_welcome') == "1"
self.auth_type = settings.get_setting('auth_type')
self.auth_data = settings.get_setting('auth_data')
self.post_identify = settings.get_setting('post_identify')
def on_welcome(self, connection: ServerConnection, event: Event) -> None:
if self.auth_on_welcome:
self.handle_auth(connection)
def handle_auth(self, connection: ServerConnection) -> None:
if self.auth_type == 'nickserv':
connection.privmsg('nickserv', 'IDENTIFY ' + self.auth_data)
def on_umode(self, connection: ServerConnection, event: Event) -> None:
if event.target == connection.nickname and not self.post_auth_sent:
self.post_auth_sent = True
if self.post_identify != '':
connection.send_raw(self.post_identify)
| utoxin/TimTheWordWarBot | timmy/event_handlers/auth_handler.py | auth_handler.py | py | 1,205 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "timmy.db_access.settings.get_setting",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "timmy.db_access.settings",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "timmy.db_access.settings.get_setting",
"line_number": 16,
"usage_type": "ca... |
24337707923 | from lxml import html
import requests
from time import sleep
import json
import argparse
from collections import OrderedDict
from time import sleep
def parse(ticker):
# Code to get the stock price
url = "http://finance.yahoo.com/quote/%s?p=%s" % (ticker, ticker)
response = requests.get(url, verify=False)
print("Parsing %s" % (url))
parser = html.fromstring(response.text)
summary_table = parser.xpath('//div[contains(@data-test,"summary-table")]//tr')
summary_data = OrderedDict()
try:
for table_data in summary_table:
raw_table_key = table_data.xpath('.//td[contains(@class,"C(black)")]//text()')
raw_table_value = table_data.xpath('.//td[contains(@class,"Ta(end)")]//text()')
table_key = ''.join(raw_table_key).strip()
table_value = ''.join(raw_table_value).strip()
summary_data.update({table_key: table_value})
except:
print("Failed to parse json response")
return {"error": "Failed to parse json response"}
# code to get the current avg estimate, next avg estimate, and next 5 years growth estimate
url = "http://finance.yahoo.com/quote/%s/analysis?p=%s" % (ticker, ticker)
response = requests.get(url, verify=False)
print("Parsing %s" % (url))
parser = html.fromstring(response.text)
summary_table = parser.xpath('//section[contains(@data-test,"qsp-analyst")]//tr')
# summary_data = OrderedDict()
try:
for table_data in summary_table:
raw_table_key = table_data.xpath('.//td[contains(@class,"Py(10px)")]//text()')
raw_table_value = table_data.xpath('.//td[contains(@class,"Ta(end)")]//text()')
table_key = ' '.join(raw_table_key).strip()
table_value = ' '.join(raw_table_value).strip()
summary_data.update({table_key: table_value})
summary_data.update({'ticker': ticker})
return summary_data
except:
print("Failed to parse json response")
return {"error": "Failed to parse json response"}
def parseWiki():
# code to get all stocks from wiki page
url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
response = requests.get(url, verify=False)
print("Parsing")
parser = html.fromstring(response.text)
summary_table = parser.xpath('//table[contains(@id,"constituents")]//tr')
summary_data = OrderedDict()
n = 0
try:
for table_data in summary_table:
raw_table_value = table_data.xpath('.//a[contains(@class,"external text")]//text()')
table_value = ' '.join(raw_table_value).strip()
x = table_value.split(" reports")
# table_value = table_value[0:5]
summary_data.update({n: x[0]})
n = n + 1
return summary_data
except:
print("Failed to parse json response")
return {"error": "Failed to parse json response"}
def parseAll():
arr = []
for item in list(parseWiki().items())[1:]:
arr.append(parse(item[1]))
return arr
if __name__ == "__main__":
"""argparser = argparse.ArgumentParser()
argparser.add_argument('ticker', help='')
args = argparser.parse_args()
ticker = args.ticker
print ("Fetching data for %s" % (ticker))"""
scraped_data = parseAll()
print ("Writing data to output file")
with open('%s-analysis.json', 'w') as fp:
json.dump(scraped_data, fp, indent=4)
f = open("done.txt", "w+")
for i in range(10):
f.write("This is line %d\r\n" % (i + 1))
f.close() | JackMcNally24/VODCA | python/Scripts/analysis.py | analysis.py | py | 3,562 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",... |
18108561085 | # mypy: disable-error-code=arg-type
import asyncio
import discord
import validators
import wavelink
from discord import app_commands
from wavelink.ext import spotify
from .. import config
from ..client import CustomClient
def add_streaming_commands(client: CustomClient) -> None:
@client.tree.command(
name="youtube",
description="play audio from a YouTube video",
)
@app_commands.describe(query="search request or URL")
async def queue_youtube(
interaction: discord.Interaction, *, query: str
) -> None:
player = await ensure_voice_channel(interaction)
tracks = await wavelink.YouTubeTrack.search(query)
if not tracks:
await interaction.response.send_message(
f"No search results for query *{query}*"
)
track = tracks[0]
await player.queue.put_wait(track)
await interaction.response.send_message(f"**{track}** added to queue")
await start_playing(interaction, player)
@client.tree.command(
name="spotify",
description="play spotify tracks, playlists and albums from a URL",
)
@app_commands.describe(
url=(
"spotify URL "
"(https://open.spotify.com/track|album|playlist/...)"
)
)
async def queue_spotify(
interaction: discord.Interaction, *, url: str
) -> None:
player = await ensure_voice_channel(interaction)
decoded = spotify.decode_url(url)
if not validators.url(url) or decoded is None:
await interaction.response.send_message("Invalid URL provided")
return
if decoded["type"] in (
spotify.SpotifySearchType.album,
spotify.SpotifySearchType.playlist,
):
await interaction.response.send_message(
"Loading tracks into the queue"
)
tracks = spotify.SpotifyTrack.iterator(query=url)
if not player.is_playing():
first_item = await anext(tracks)
await player.play(first_item)
await interaction.followup.send(
f"Playing **{first_item.title}**"
)
async for track in tracks:
await player.queue.put_wait(track)
await interaction.followup.send(
f"**{track.title}** added to queue"
)
await interaction.followup.send(
f"Loading done. Items in queue: {len(player.queue)}"
)
elif decoded["type"] == spotify.SpotifySearchType.track:
tracks = await spotify.SpotifyTrack.search(url)
if not tracks:
await interaction.response.send_message(
f"No search results for *{url}*"
)
track = tracks[0]
await player.queue.put_wait(track)
await interaction.response.send_message(
f"**{track.title}** added to queue"
)
await start_playing(interaction, player)
@client.tree.command(name="outro", description="epic disconnect")
async def play_n_leave(interaction: discord.Interaction) -> None:
player = await ensure_voice_channel(interaction)
await interaction.response.send_message("It's time to go to sleep")
url = config.OUTRO_VIDEO["url"]
tracks = await wavelink.YouTubeTrack.search(url)
if not tracks:
await interaction.response.send_message(
f"Couldn't find the video, please verify that link is correct: '{url}'"
)
track = tracks[0]
await player.play(track)
while player.current == track and player.is_playing():
await asyncio.sleep(0.25)
if player.position >= config.OUTRO_VIDEO["timestamp_ms"]:
await player.disconnect()
await client.change_presence(status=discord.Status.idle)
async def ensure_voice_channel(
interaction: discord.Interaction,
) -> wavelink.Player:
if not interaction.user:
raise discord.DiscordException("interaction.user is None")
if not isinstance(interaction.user, discord.Member):
raise discord.DiscordException(
"interaction.user is not a discord.Member object"
)
author_voice = interaction.user.voice
if not author_voice:
await interaction.response.send_message(
"You're not in a voice channel"
)
raise discord.DiscordException("interaction.user.voice is None")
if not interaction.guild:
raise discord.DiscordException("interaction.guild is None")
player: wavelink.Player = interaction.guild.voice_client
if not player and author_voice.channel:
player = await author_voice.channel.connect(cls=wavelink.Player)
return player
if author_voice.channel != player.channel:
await interaction.response.send_message(
"You're in a different channel"
)
raise discord.DiscordException(
"user is in a different voice channel"
)
return player
async def start_playing(
interaction: discord.Interaction, player: wavelink.Player
) -> None:
if player.is_playing() or player.queue.is_empty:
return
next_item = player.queue.get()
await player.play(next_item)
await interaction.followup.send(f"Playing **{next_item.title}**")
| sasunday26/discord-music-bot | discord_music_bot/commands/streaming.py | streaming.py | py | 5,621 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "client.CustomClient",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "wavelink.YouTubeTrack.search",
"line_number": 24,
"usage_type": "call"
},
{
"api_nam... |
23856076875 | from selenium import webdriver
from lxml import etree
import time,re,random,csv
driver_path = r'D:\chromedriver\chromedriver.exe'
class BossSpider(object):
def __init__(self,writer):
self.base_url = 'https://www.zhipin.com'
self.url = 'https://www.zhipin.com/job_detail/?query=python&city=101010100&industry=&position='
self.driver = webdriver.Chrome(executable_path=driver_path)
self.writer = writer
def run(self):
self.driver.get(url=self.url)
while True:
page_source = self.driver.page_source
self.request_catalog_page(page_source=page_source)
next_btn = self.driver.find_element_by_xpath("//div["
"@class='page']//a[last()]")
if "next disabled" in next_btn.get_attribute("class"):
return
else:
next_btn.click()
def request_catalog_page(self,page_source):
html = etree.HTML(page_source)
urls = html.xpath("//div[@class='info-primary']//a/@href")
for url in urls:
url = self.base_url + url
self.request_detail_page(url=url)
time.sleep(random.randint(1,3))
def request_detail_page(self,url):
self.driver.execute_script("window.open('%s')" %url)
self.driver.switch_to.window(self.driver.window_handles[1])
detail_page_source = self.driver.page_source
detail_html = etree.HTML(detail_page_source)
self.prase_detail_page(detail_html=detail_html)
def prase_detail_page(self,detail_html):
all_datas = []
job_status = detail_html.xpath("//div[@class='job-status']/text()")[0]
job_name = detail_html.xpath("//div[@class='info-primary']//h1/text("
")")[0]
salary = detail_html.xpath("//span[@class='salary']/text()")[0]
salary = re.sub(r"[\s]","",salary)
dates = detail_html.xpath("//div[@class='job-banner']//p/text()")
city = dates[0]
work_years = dates[1]
education = dates[2]
job_description = detail_html.xpath("//div[@class='job-sec']//div["
"@class='text']//text()")
job_description = "".join(list(map(lambda x:re.sub(r"(\n|●| )","",x),
job_description)))
company_introduction = detail_html.xpath("//div[@class='job-sec company-info']//div["
"@class='text']//text()")
company_introduction = "".join(list(map(lambda x: re.sub(r"(\n|●| )", "", x),
company_introduction)))
com = detail_html.xpath("//div[@class='job-sec company-info']//a/@href")
if len(com):
company_url = self.base_url + com[0]
else:
company_url = self.base_url
work_address = detail_html.xpath("//div["
"@class='location-address']/text()")[0]
map_url = detail_html.xpath("//div["
"@class='job-location']//img/@src")[0]
contacts = detail_html.xpath("//h2[@class='name']/text()")
if len(contacts):
contacts = contacts[0]
datas_dict = {
'job_name':job_name,
'job_status':job_status,
'salary':salary,
'city':city,
'work_years':work_years,
'education':education,
'job_description':job_description,
'company_introduction':company_introduction,
'company_url':company_url,
'work_address':work_address,
'map_url':map_url,
'contacts': contacts
}
all_datas.append(datas_dict)
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
self.writer.writerows(all_datas)
print('保存成功!')
def writer_func():
headers = ['job_name', 'job_status', 'salary', 'city', 'work_years',
'education', 'job_description', 'company_introduction',
'company_url', 'work_address', 'map_url', 'contacts']
fp = open("boss.csv", 'a', newline='', encoding='utf-8')
writer = csv.DictWriter(fp, headers)
writer.writeheader()
return writer
if __name__ == '__main__':
writer = writer_func()
bossspider = BossSpider(writer)
bossspider.run() | rbp123/spiders | boss_spider.py | boss_spider.py | py | 4,554 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "lxml.etr... |
8083603585 | from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from group4_banker import Group4Banker
from plot_config import setup, set_fig_size, set_arrowed_spines
setup()
def prep_data():
features = ['checking account balance', 'duration', 'credit history',
'purpose', 'amount', 'savings', 'employment', 'installment',
'marital status', 'other debtors', 'residence time',
'property', 'age', 'other installments', 'housing', 'credits',
'job', 'persons', 'phone', 'foreign']
target = 'repaid'
df_train = pd.read_csv("../../data/credit/D_train.csv", sep=' ', names=features+[target])
df_test = pd.read_csv("../../data/credit/D_test.csv", sep=' ', names=features+[target])
numerical_features = ['duration', 'age', 'residence time', 'installment', 'amount', 'persons', 'credits']
quantitative_features = list(filter(lambda x: x not in numerical_features, features))
D_train = pd.get_dummies(df_train, columns=quantitative_features, drop_first=True)
D_test = pd.get_dummies(df_test, columns=quantitative_features, drop_first=True)
encoded_features = list(filter(lambda x: x != target, D_train.columns))
return D_train, D_test, encoded_features, target
def oob_sampling(model, X, y, n_samples, size_sample, seed):
np.random.seed(seed)
Y_true = np.zeros((n_samples, size_sample))
Y_pred = np.zeros((n_samples, size_sample))
for i in tqdm(range(n_samples)):
test_idx = np.random.choice(np.arange(X.shape[0]), replace=True, size=size_sample)
y_test = y.iloc[test_idx]
X_test = X.iloc[test_idx]
Y_true[i] = y_test.values - 1
Y_pred[i] = [int(model.get_best_action(x)) for _, x in X_test.iterrows()]
return Y_true, Y_pred
def posterior_hypothesis(X, p):
n_samples, n_trails = np.shape(X)
posterior = np.zeros_like(X, dtype=float)
for i in range(n_trails):
# Parameters for Beta prior.
alpha = 1
beta = 1
log_p, log_p_marginal = 0, 0
for j in range(n_samples):
x = X[j, i]
p_x = (x * alpha + (1 - x) * beta) / (alpha + beta)
alpha = alpha + x
beta = beta + 1 - x
# Prior for the null hypothesis being true.
log_p = log_p + np.log(p)
log_p_marginal = log_p_marginal + np.log(p_x)
posterior[j, i] = np.exp(log_p - np.log(np.exp(log_p_marginal) + np.exp(log_p)))
return posterior
def plot_posteriors(Y_true, Y_pred):
delta = 0.05
# H0:
p_not_grant = 1 - np.mean(Y_true)
p_not_grant_low = p_not_grant - np.sqrt(np.log(2) * delta / (2 * Y_true.shape[0]))
p_not_grant_high = p_not_grant + np.sqrt(np.log(2) * delta / (2 * Y_true.shape[0]))
# Posterior hypothesis of H0.
post_grant_true = posterior_hypothesis(Y_true, p=p_not_grant)
post_grant_pred = posterior_hypothesis(Y_pred, p=p_not_grant)
post_grant_pred_low = posterior_hypothesis(Y_pred, p=p_not_grant_low)
post_grant_pred_high = posterior_hypothesis(Y_pred, p=p_not_grant_high)
fig, ax = plt.subplots(1, 1, figsize=set_fig_size(500, fraction=1))
ax.set_title(r"Posterior probability of accepting $H_0$")
ax.plot(np.mean(post_grant_true, axis=1), label="Test data", c="darkorange", alpha=0.7)
ax.plot(np.mean(post_grant_pred, axis=1), label="Model predictions", alpha=0.7)
ax.fill_between(np.arange(Y_true.shape[0]),
np.mean(post_grant_pred_low, axis=1),
np.mean(post_grant_pred_high, axis=1), alpha=0.2)
ax.legend()
ax.set_ylabel("Posterior probability")
ax.set_xlabel("Number of bootstrap samples")
set_arrowed_spines(fig, ax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
fig.savefig("posterior_null_hypo.pdf")
bayes_reject_grant_true = np.mean(post_grant_true < p_not_grant, axis=1)
bayes_reject_grant_pred = np.mean(post_grant_pred < p_not_grant, axis=1)
bayes_reject_grant_pred_low = np.mean(post_grant_pred_low < p_not_grant, axis=1)
bayes_reject_grant_pred_high = np.mean(post_grant_pred_high < p_not_grant, axis=1)
fig, ax = plt.subplots(1, 1, figsize=set_fig_size(500, fraction=1))
ax.set_title(r"Posterior probability of rejecting $H_0$")
ax.plot(bayes_reject_grant_true, label="Test data", c="darkorange", alpha=0.7)
ax.plot(bayes_reject_grant_pred, label="Model predictions", alpha=0.7)
ax.fill_between(np.arange(Y_true.shape[0]),
bayes_reject_grant_pred_low,
bayes_reject_grant_pred_high, alpha=0.2)
ax.legend()
ax.set_ylabel("Posterior probability")
ax.set_xlabel("Number of bootstrap samples")
set_arrowed_spines(fig, ax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
fig.savefig("reject_null_hypo.pdf")
def main():
D_train, D_test, encoded_features, target = prep_data()
X_train = D_train.loc[:, encoded_features]
y_train = D_train.loc[:, target]
model = Group4Banker(optimize=False, random_state=42)
model.set_interest_rate(0.05)
model.fit(X_train, y_train)
X_test = D_test.loc[:, encoded_features]
y_test = D_test.loc[:, target]
Y_true, Y_pred = oob_sampling(model, X_test, y_test, 500, 50, 42)
np.save("Y_true.npy", Y_true)
np.save("Y_pred.npy", Y_pred)
Y_true = np.load("Y_true.npy")
Y_pred = np.load("Y_pred.npy")
plot_posteriors(Y_true, Y_pred)
if __name__ == "__main__":
main()
| moeennaqvi/BankCreditProject | src/action_sensitivity.py | action_sensitivity.py | py | 5,750 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "plot_config.setup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies"... |
18785321963 | from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'reto', views.RetoViewSet)
router.register(r'jugador', views.JugadoresViewSet)
router.register(r'usuarios', views.UsuarioViewSet, basename='usuario')
router.register(r'partidas', views.PartidaViewSet, basename='partida')
#Reto
router.register(r'estudiantes', views.EstudianteViewSet)
router.register(r'juegos', views.JuegoViewSet)
router.register(r'intentos', views.IntentosViewSet)
urlpatterns = [
path('api/',include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('',views.index, name='index'),
path('procesamiento',views.procesamiento, name='procesamiento'),
path('lista',views.lista,name='lista'),
path('suma',views.suma,name='suma'),
path('resta',views.resta,name='resta'),
path('multiplicacion',views.multiplicacion,name='multiplicacion'),
path('division',views.division,name='division'),
path('usuario_lista',views.usuarios,name='usuarios'),
path('usuario_pos',views.usuario_pos,name='usuario_pos'),
path('usuario_del',views.usuario_del,name='usuario_del'),
path('usuario_updt',views.usuario_updt,name='usuario_updt'),
path('login', views.login, name='login'),
path('procesologin', views.procesologin, name='procesologin'),
path('valida_usuario',views.valida_usuario,name='valida_usuario'),
path('grafica',views.grafica,name='grafica'),
path('barras',views.barras,name='barras'),
path('gauge',views.gauge_chart,name='gauge'),
path('datos_estudiantes',views.datos_estudiantes,name='datos_estudiantes'),
]
| Aram32mm/tarea1 | calculadora/urls.py | urls.py | py | 1,688 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_na... |
11785008616 | # NN ライブラリを使いやすいように変更
# 7763c160b4ec1caa99718cd3c865339227a1908e
import numpy as np
import matplotlib.pyplot as plt
def main():
# ファイルからプロットする値を読み込む
true_x = []
predict_x = []
with open("data/tmp_result.csv") as fileobj:
while True:
line = fileobj.readline()
if line:
line = line.rstrip()
splitted = line.split(",")
true_x.append(float(splitted[0]))
predict_x.append(float(splitted[1]))
else:
break
# 相関係数
coef = np.corrcoef(true_x, predict_x)[0, 1]
print(coef)
# 散布図を描画
font_size = 15
fig = plt.figure(figsize=(12, 6))
# x
xy_max = np.max(true_x+predict_x)
xy_min = np.min(true_x+predict_x)
g_x = fig.add_subplot(1, 2, 1)
plt.plot([xy_min, xy_max], [xy_min, xy_max], color="orange")
plt.scatter(true_x, predict_x)
plt.title("xt (相関係数:"+f'{coef:.3f}'+")",
fontname="MS Gothic", fontsize=font_size)
plt.xlabel("xt の真値 [px]", fontname="MS Gothic", fontsize=font_size)
plt.ylabel("xt の予測値 [px]", fontname="MS Gothic", fontsize=font_size)
plt.grid(True)
# t
# xy_max = np.max(result[:, 2:4])
# xy_min = np.min(result[:, 2:4])
# g_t = fig.add_subplot(1, 2, 2)
# plt.plot([xy_min, xy_max], [xy_min, xy_max], color="orange")
# plt.scatter(result[:, 2:3], result[:, 3:4])
# plt.title("tt (相関係数:"+f'{coef_t:.3f}'+")",
# fontname="MS Gothic", fontsize=font_size)
# plt.xlabel("tt の真値 [sec]", fontname="MS Gothic", fontsize=font_size)
# plt.ylabel("tt の予測値 [sec]", fontname="MS Gothic", fontsize=font_size)
# plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
| eipuuuuk825/Puniki4 | python/src/plot_scatter.py | plot_scatter.py | py | 1,885 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.corrcoef",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.max",... |
2340324466 | import torch
from torch import nn
__all__ = [
'DotProductAttention'
, 'AdditiveAttention'
]
class Attention(nn.Module):
'''base model for attention'''
def __init__(self, *args, **kwargs) -> None:
super().__init__()
self.maskValue = -1e10
def forward(self, *args, **kwargs):
'''should be implemented in child class'''
raise NotImplementedError
def _shapeCheck(self, inputTensor:torch.Tensor, name:str, dim:int):
'''check if inputTensor's dimension is equal to dim'''
try:
assert(len(inputTensor.shape) == dim)
except AssertionError:
raise TypeError(f'{name} must be a {dim}D tensor but got shape of {inputTensor.shape}')
def _equalCheck(self, val0:any, name0:str, val1:any, name1:str):
'''check if val0 is equal to val1'''
if isinstance(val0, torch.Tensor):
try:
assert(torch.equal(val0, val1))
except AssertionError:
raise ValueError(f'{name0} should be the same as {name1}, but got {val0} and {val1}')
else:
try:
assert(val0 == val1)
except AssertionError:
raise ValueError(f'{name0} should be the same as {name1}, but got {val0} and {val1}')
def _mask(self, score, mask):
'''
mask is a 0-1 tensor with the same shape of score
set score[where mask = 0] to self.maskValue(-1e10)
'''
self._equalCheck(score.shape, 'shape of score', mask.shape, 'shape of mask')
return score.masked_fill(mask == 0, self.maskValue)
class DotProductAttention(Attention):
'''dot product attention implementation'''
def __init__(self, *args, **kwargs) -> None:
super().__init__(args, kwargs)
self.softmax = nn.Softmax(dim=-1)
def forward(self, queries:torch.Tensor, keys:torch.Tensor, values:torch.Tensor, mask = None):
'''
input
queries: (batchSize, n, dk) note that dimension of query must be same as key
keys: (batchSize, m, dk)
values: (batchSize, m, dv)
mask: (batchSize, n, m)
return
attention output: (batchSize, n, dv) for n queries
'''
self._shapeCheck(queries, 'queries', dim=3)
self._shapeCheck(keys, 'keys', dim=3)
self._shapeCheck(values, 'values', dim=3)
self._equalCheck(queries.shape[2], 'query\'s length', keys.shape[2], 'key\'s length')
self._equalCheck(keys.shape[1], 'amount of keys', values.shape[1], 'amount of values')
attentionScore = torch.bmm(queries, keys.transpose(1, 2)) / keys.shape[2]**0.5 # (batchSize, n, m)
if mask is not None:
attentionScore = self._mask(attentionScore, mask)
attentionWeight = self.softmax(attentionScore)
return torch.bmm(attentionWeight, values)
class AdditiveAttention(Attention):
'''additive attention implementation'''
def __init__(self, querySize:int, keySize:int, numHidden:int, **kwargs) -> None:
super().__init__(querySize, keySize, numHidden, **kwargs)
self.queryTransform = nn.Linear(querySize, numHidden) # transform query dim from querySize to numHidden
self.keyTransform = nn.Linear(keySize, numHidden) # transform key dim from keySize to numHidden
self.tanh = nn.Tanh()
self.valueTransform = nn.Linear(numHidden, 1) # transform key-query coincidence to scalar for attention score
self.softmax = nn.Softmax(dim=-1)
def forward(self, queries:torch.Tensor, keys:torch.Tensor, values:torch.Tensor, mask = None):
'''
input
queries: (batchSize, n, dq)
keys: (batchSize, m, dk)
values: (batchSize, m, dv)
return
attention output: (batchSize, n, dv) for n queries
'''
self._shapeCheck(queries, 'queries', dim=3)
self._shapeCheck(keys, 'keys', dim=3)
self._shapeCheck(values, 'values', dim=3)
self._equalCheck(keys.shape[1], 'amount of keys', values.shape[1], 'amount of values')
transformedQueries = self.queryTransform(queries) # (batchSize, n, h)
transformedKeys = self.keyTransform(keys) # (batchSize, m, h)
transformedQueries = transformedQueries.unsqueeze(2) # (batchSize, n, 1, h)
transformedKeys = transformedKeys.unsqueeze(1) # (batchSize, 1, m, h)
coincidence = self.tanh(transformedKeys+transformedQueries) # (batchSize, n, m, h)
attentionScore = self.valueTransform(coincidence) # (batchSize, n, m, 1)
attentionScore = attentionScore.squeeze(-1) # (batchSize, n, m)
if mask is not None:
attentionScore = self._mask(attentionScore, mask)
attentionWeight = self.softmax(attentionScore) # (batchSize, n, m)
return torch.bmm(attentionWeight, values)
class MutiHeadAttention(Attention):
'''muti-head attention implementation with dot product attention'''
def __init__(self, querySize:int, keySize:int, valueSize:int, numHidden:int, numHeads:int, **kwargs) -> None:
super().__init__(querySize, keySize, valueSize, numHidden, numHeads, **kwargs)
self.numHeads = numHeads
self.attention = DotProductAttention(querySize, keySize, valueSize, numHidden, **kwargs)
self.queryTransform = nn.Linear(querySize, numHidden) # transform query dim from querySize to numHidden
self.keyTransform = nn.Linear(keySize, numHidden) # transform key dim from keySize to numHidden
self.valueTransform = nn.Linear(valueSize, numHidden) # transform value dim from valueSize to numHidden
self.outputTransform = nn.Linear(numHidden, numHidden)
def forward(self, queries:torch.Tensor, keys:torch.Tensor, values:torch.Tensor, mask = None):
'''
input
queries: (batchSize, n, dq)
keys: (batchSize, m, dk)
values: (batchSize, m, dv)
mask: (batchSize, n, m)
return
attention output: (batchSize, n, numHidden) for n queries
'''
self._shapeCheck(queries, 'queries', dim=3)
self._shapeCheck(keys, 'keys', dim=3)
self._shapeCheck(values, 'values', dim=3)
self._equalCheck(keys.shape[1], 'amount of keys', values.shape[1], 'amount of values')
transformedQueries = self.queryTransform(queries) # (batchSize, n, hidden)
transformedKeys = self.keyTransform(keys) # (batchSize, m, hidden)
transformedValues = self.valueTransform(values) # (batchSize, m, hidden)
transformedQueries = self.split(transformedQueries) # (b*numHeads, n, hidden/numHeads)
transformedKeys = self.split(transformedKeys) # (b*numHeads, m, hidden/numHeads)
transformedValues = self.split(transformedValues) # (b*numHeads, m, hidden/numHeads)
if mask is not None:
# On axis 0, copy the first item (scalar or vector) for num_heads
# times, then copy the next item, and so on
mask = torch.repeat_interleave(mask, repeats=self.numHeads, dim=0)
out = self.attention(transformedQueries, transformedKeys, transformedValues, mask)
out = self.concat(out) # (b, n, hidden)
return self.outputTransform(out) # (b, n, hidden)
def split(self, X:torch.Tensor):
'''
input
X: (b, n, d)
return
out: (b*numHeads, n, d/numHeads)
'''
try:
assert(X.shape[2] % self.numHeads == 0)
except AssertionError:
raise ValueError(f'length of vector should be no. of heads\' multiple, but got {X.shape[2]} % numHeads != 0')
X = X.reshape(X.shape[0], X.shape[1], self.numHeads, -1) # (b, n, numHeads, d/numHeads)
X = X.transpose(1, 2) # (b, numHeads, n, d/numHeads)
return X.reshape(X.shape[0]*X.shape[1], X.shape[2], X.shape[3])
def concat(self, X:torch.Tensor):
'''
input
X: (b*numHeads, n, d/numHeads)
return
out: (b, n, d)
'''
X = X.reshape(-1, self.numHeads, X.shape[1], X.shape[2])
X = X.transpose(1, 2)
return X.reshape(X.shape[0], X.shape[1], -1)
if __name__ == '__main__':
attension = MutiHeadAttention(128, 512, 256, 128, 8)
queries = torch.randn((10, 100, 128))
keys = torch.randn((10, 1024, 512))
values = torch.randn((10, 1024, 256))
output = attension(queries, keys, values, torch.randint(0, 1, (queries.shape[0], queries.shape[1], keys.shape[1])))
print(output.shape) | Thyme-git/transformer | layers/attention.py | attention.py | py | 8,633 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"lin... |
10886136870 | from flask import Flask, render_template, request, Response, redirect, url_for , session , jsonify , flash
from flask_bootstrap import Bootstrap
from object_detection import *
import object_detection
from flask_sqlalchemy import SQLAlchemy # import sqlalchemy
from database import db , Vehicle , DB_Manager
import webbrowser
from threading import Timer #Debug Autostart
application = Flask(__name__)
application.config.update(
TESTING = True,
SECRET_KEY = "password"
)
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/vehicle_db.sqlite3' # Config to use sqlalchemy
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(application)
db_manager = DB_Manager()
VIDEO = VideoStreaming(camera_src = 0)
@application.route('/')
def home():
page_title = 'SecureV | Home'
current_plate = ""
current_owner = ""
return render_template('index.html', TITLE=page_title , PLATE = current_plate, OWNER = current_owner)
@application.route('/update_plate' , methods=['POST'])
def updateplate():
current_plate = object_detection.current_plate
db_manager.get_db_data() # Not sure
try:
current_owner = db_manager.search_owner(current_plate)
except:
current_owner = "Not Verified"
return jsonify('' , render_template('dynamic_plate.html', PLATE = current_plate , OWNER = current_owner))
@application.route('/update_gate' , methods=['POST'])
def updategate():
# current_plate = object_detection.current_plate
# current_g_status = False
current_g_status = "Closed"
return jsonify('' , render_template('dynamic_gate.html', GATE = current_g_status ))
@application.route('/video_feed')
def video_feed():
'''
Video streaming route.
'''
return Response(
VIDEO.show(),
mimetype='multipart/x-mixed-replace; boundary=frame'
)
@application.route('/request_model_switch')
def request_model_switch():
#TODO: On toggle OFF turn current-plate to XXX-XXXX
# if VIDEO.detect == True:
# # current_plate = "XXX-XXXX"
# object_detection.current_plate = "XXX-XXXX"
VIDEO.detect = not VIDEO.detect
try:
print("This is a return function from VideoStreaming class " + str(VIDEO.lblret))
except:
pass
return "nothing"
@application.route("/database" , methods=["POST","GET"])
def data_mode():
page_title = 'SecureV | Database Mode'
db_man = DB_Manager()
error = False
error_message = ""
try:
if request.method =="POST":
session.permanent = True
car_to_delete = request.form["delete_plate_input"].upper()
if car_to_delete:
db_man.delete_car_and_all_entries(car_to_delete)
flash(f"Deleted Vehicle: [{car_to_delete}] and all Entries ", "info")
else:
error = True
error_message = "Input Plate isn't registered or Input is empty"
return redirect(url_for('data_mode'))
except Exception as e:
print(f"EXCEPTION AT /database route: {e}")
error = True
error_message = "Vehicle Plate is invalid or not existing"
return render_template("data_mode.html" , db_data = db_man.db_data, TITLE=page_title , error = error , error_msg = error_message)
@application.route("/register", methods=["POST","GET"])
def register_mode():
page_title = 'SecureV | Register Mode'
error = False
error_message = ""
try:
if request.method =="POST":
session.permanent = True
plate_input = request.form["plate_input"].upper()
owner_input = request.form["owner_input"].title()
if plate_input:
current_registering = Vehicle(plate_num = plate_input , owner_name = owner_input)
db.session.add(current_registering)
db.session.commit()
db_manager.get_db_data()
flash(f"Successfully Registered [{plate_input}]", "info")
else:
error = True
error_message = "Input Box Empty"
except Exception as e:
error = True
error_message = "Error Input Box Value"
return render_template("register_mode.html", TITLE=page_title , error = error , error_msg = error_message)
@application.route("/logs")
def log_mode():
db_man = DB_Manager()
page_title = 'SecureV | Log Mode'
return render_template("log_mode.html", db_data = db_man.db_data_entries, TITLE=page_title)
def open_browser():
''' Debug autostartt'''
webbrowser.open_new('http://127.0.0.1:2000/')
if __name__ == '__main__':
# Timer(3, open_browser).start() # Auto open browser
# db.create_all() # Create db when it doesnt exist
application.run(port = 2000 , debug = True) | LeandroMartinMacato/SecureV-App | app/app.py | app.py | py | 4,819 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "database.db",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "database.DB_Manag... |
73034000353 | # -*- coding: utf-8 -*-
'''
Functions used for CLI argument handling
'''
from __future__ import absolute_import
# Import python libs
import re
import inspect
# Import salt libs
from salt.ext.six import string_types, integer_types
import salt.ext.six as six
#KWARG_REGEX = re.compile(r'^([^\d\W][\w.-]*)=(?!=)(.*)$', re.UNICODE) # python 3
KWARG_REGEX = re.compile(r'^([^\d\W][\w.-]*)=(?!=)(.*)$')
def condition_input(args, kwargs):
'''
Return a single arg structure for the publisher to safely use
'''
ret = []
for arg in args:
if isinstance(arg, long):
ret.append(str(arg))
else:
ret.append(arg)
if isinstance(kwargs, dict) and kwargs:
kw_ = {'__kwarg__': True}
for key, val in six.iteritems(kwargs):
kw_[key] = val
return ret + [kw_]
return ret
def parse_input(args, condition=True):
'''
Parse out the args and kwargs from a list of input values. Optionally,
return the args and kwargs without passing them to condition_input().
Don't pull args with key=val apart if it has a newline in it.
'''
_args = []
_kwargs = {}
for arg in args:
if isinstance(arg, string_types):
arg_name, arg_value = parse_kwarg(arg)
if arg_name:
_kwargs[arg_name] = yamlify_arg(arg_value)
else:
_args.append(yamlify_arg(arg))
elif isinstance(arg, dict):
# Yes, we're popping this key off and adding it back if
# condition_input is called below, but this is the only way to
# gracefully handle both CLI and API input.
if arg.pop('__kwarg__', False) is True:
_kwargs.update(arg)
else:
_args.append(arg)
else:
_args.append(arg)
if condition:
return condition_input(_args, _kwargs)
return _args, _kwargs
def parse_kwarg(string_):
'''
Parses the string and looks for the following kwarg format:
"{argument name}={argument value}"
For example: "my_message=Hello world"
Returns the kwarg name and value, or (None, None) if the regex was not
matched.
'''
try:
return KWARG_REGEX.match(string_).groups()
except AttributeError:
return None, None
def yamlify_arg(arg):
'''
yaml.safe_load the arg
'''
if not isinstance(arg, string_types):
return arg
if arg.strip() == '':
# Because YAML loads empty strings as None, we return the original string
# >>> import yaml
# >>> yaml.load('') is None
# True
# >>> yaml.load(' ') is None
# True
return arg
elif '_' in arg and all([x in '0123456789_' for x in arg.strip()]):
return arg
try:
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
original_arg = arg
if '#' in arg:
# Only yamlify if it parses into a non-string type, to prevent
# loss of content due to # as comment character
parsed_arg = yamlloader.load(arg, Loader=yamlloader.SaltYamlSafeLoader)
if isinstance(parsed_arg, string_types) or parsed_arg is None:
return arg
return parsed_arg
if arg == 'None':
arg = None
else:
arg = yamlloader.load(arg, Loader=yamlloader.SaltYamlSafeLoader)
if isinstance(arg, dict):
# dicts must be wrapped in curly braces
if (isinstance(original_arg, string_types) and
not original_arg.startswith('{')):
return original_arg
else:
return arg
elif arg is None \
or isinstance(arg, (list, float, integer_types, string_types)):
# yaml.safe_load will load '|' as '', don't let it do that.
if arg == '' and original_arg in ('|',):
return original_arg
# yaml.safe_load will treat '#' as a comment, so a value of '#'
# will become None. Keep this value from being stomped as well.
elif arg is None and original_arg.strip().startswith('#'):
return original_arg
else:
return arg
else:
# we don't support this type
return original_arg
except Exception:
# In case anything goes wrong...
return original_arg
def get_function_argspec(func):
'''
A small wrapper around getargspec that also supports callable classes
'''
if not callable(func):
raise TypeError('{0} is not a callable'.format(func))
if inspect.isfunction(func):
aspec = inspect.getargspec(func)
elif inspect.ismethod(func):
aspec = inspect.getargspec(func)
del aspec.args[0] # self
elif isinstance(func, object):
aspec = inspect.getargspec(func.__call__)
del aspec.args[0] # self
else:
raise TypeError('Cannot inspect argument list for {0!r}'.format(func))
return aspec
| shineforever/ops | salt/salt/utils/args.py | args.py | py | 5,142 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "salt.ext.six.iteritems",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "salt.ext.six",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "salt.ext.six.string_ty... |
31305593402 | from django.urls import path
from .views import (
ArticulosAgregarView, ArticulosApiView, ArticulosDetalladoView, ArticulosEliminarView,ArticuloActualizarView)
urlpatterns = [
path('api/', ArticulosApiView.as_view(), name="Api"),
path('api/agregar/', ArticulosAgregarView.as_view(), name="create"),
path('api/detallado/<pk>/', ArticulosDetalladoView.as_view(), name="detail"),
path('api/eliminar/<pk>/', ArticulosEliminarView.as_view(), name="delete"),
path('api/actualizar/<pk>/', ArticuloActualizarView.as_view(), name="update"),
]
| AlbaBermudez/ProyectoAPI | ProyectoAPI/appAPI/urls.py | urls.py | py | 575 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.ArticulosApiView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.ArticulosApiView",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "d... |
30490609484 | import os
os.system('pip3 install lightgbm==2.1.2')
os.system('pip3 install hyperopt')
import pandas as pd
import pickle
import data_converter
import numpy as np
import scipy
from os.path import isfile
import random
import time
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import roc_auc_score
import sklearn
from multiprocessing import Pool
from preprocess import *
from boosting import *
from automl import *
module = ['read_data', 'preprocess', 'encode_cat', 'encode_mv', 'fit', 'predict']
class Model:
def __init__(self, data_info, time_info):
'''
This constructor is supposed to initialize data members.
Use triple quotes for function documentation.
'''
self.num_train_samples = 0
self.num_feat = 1
self.num_labels = 1
self.is_trained = False
self.batch_num = 0
self.total_batch_num = 10
self.batch_window_size = 3
self.y_pred = None
self.data_memory = []
self.hyper_param = None
self.missing_value_preprocess = MissingValuePreprocess(drop_ratio=0.9)
self.overall_time = np.zeros(10, dtype='float')
self.module_time = {}
for m in module:
self.module_time[m] = np.zeros(10, dtype='float')
def fit(self, F, y, data_info, time_info):
# time budget score
if not self.is_trained:
weight = [1., 1., 2., 4.]
feat_weight = 0.0
for i in range(4):
feat_weight += weight[i] * data_info['loaded_feat_types'][i]
self.budget_score = float(data_info['time_budget']) / (y.shape[0] * 10. / 1e6) / feat_weight
self.time_manager = TimeManager(self.budget_score, self.batch_window_size)
print ('budget score: %.2f' %(self.budget_score))
self.use_mv = False
if (data_info['loaded_feat_types'][3] != 0 and self.budget_score > 3.5):
self.use_mv = True
# read data
if not self.is_trained:
self.module_time['read_data'][self.batch_num] = time.time() - time_info[1]
y = y.ravel()
# preprocessing
if not self.is_trained:
module_start = time.time()
F['numerical'] = self.missing_value_preprocess.fit_transform(F['numerical'], 'numerical', input_type='ndarray')
F['CAT'] = self.missing_value_preprocess.fit_transform(F['CAT'], 'CAT', input_type='dataframe')
if self.use_mv:
F['MV'] = self.missing_value_preprocess.fit_transform(F['MV'], 'MV', input_type='dataframe')
F['numerical'] = data_converter.replace_missing(F['numerical']).astype('float32')
F['CAT'] = F['CAT'].fillna('-1')
module_end = time.time()
self.module_time['preprocess'][self.batch_num] = module_end - module_start
self.F = F
# store current batch of data
if (len(self.data_memory) == self.batch_window_size):
del self.data_memory[0]
self.data_memory.append([self.F, y])
self.batch_end_time = time.time()
if self.is_trained:
self.batch_start_time = self.next_batch_start_time
else:
self.batch_start_time = time_info[1]
self.overall_time[self.batch_num] = self.batch_end_time - self.batch_start_time
print ('overall time spent on batch %d: %.2f seconds' %(self.batch_num, self.overall_time[self.batch_num]))
for m in module:
t = self.module_time[m][self.batch_num]
ratio = t / self.overall_time[self.batch_num]
print ('%s: %.2f seconds, %.2f%%' %(m, t, ratio * 100.))
if self.is_trained:
print ('time spent ratio: %.2f%%' %(self.time_spent_ratio))
self.fit_end_time = time.time()
self.next_batch_start_time = time.time()
def transferPredict(self, i, data_info, time_info):
train_num = np.concatenate([self.data_memory[j][0]['numerical'] for j in range(i, len(self.data_memory))], axis=0)
train_cat = pd.concat([self.data_memory[j][0]['CAT'] for j in range(i, len(self.data_memory))], axis=0, ignore_index=True, copy=False)
label = np.concatenate([self.data_memory[j][1] for j in range(i, len(self.data_memory))])
test_num = self.F['numerical']
test_cat = self.F['CAT']
module_start = time.time()
# encode categorical feature
all_cat = pd.concat([train_cat, test_cat], axis=0, ignore_index=True, copy=False)
del train_cat, test_cat
cat_columns = all_cat.columns.copy()
with Pool(processes=2) as pool:
cat_encode = pool.map(CATEncoder().fit_transform, [all_cat[[col]] for col in cat_columns])
pool.close()
pool.join()
all_cat = pd.concat(cat_encode, axis=1, copy=False)
del cat_encode
train_cat, test_cat = all_cat.iloc[:label.shape[0], :].reset_index(drop=True), all_cat.iloc[label.shape[0]:, :].reset_index(drop=True)
del all_cat
module_end = time.time()
self.module_time['encode_cat'][self.batch_num] = module_end - module_start
train_num, test_num = pd.DataFrame(train_num), pd.DataFrame(test_num)
train_feature = [train_num, train_cat]
test_feature = [test_num, test_cat]
# encode multi-value feature
if self.use_mv:
module_start = time.time()
max_cat_num = 1000
all_mv = pd.concat([self.data_memory[j][0]['MV'] for j in range(i, len(self.data_memory))], axis=0)
all_mv = pd.concat([all_mv, self.F['MV']], axis=0)
mv_columns = all_mv.columns.copy()
with Pool(processes=2) as pool:
mv_encode = pool.map(MVEncoder(max_cat_num=max_cat_num).fit_transform, [all_mv[col] for col in mv_columns])
all_mv = pd.concat(mv_encode, axis=1)
pool.close()
pool.join()
all_mv = all_mv.astype('int16')
train_mv, test_mv = all_mv.iloc[:label.shape[0], :].reset_index(drop=True), all_mv.iloc[label.shape[0]:, :].reset_index(drop=True)
del all_mv
train_feature.append(train_mv)
test_feature.append(test_mv)
module_end = time.time()
self.module_time['encode_mv'][self.batch_num] = module_end - module_start
feature_new = pd.concat(train_feature, axis=1)
for feat in train_feature:
del feat
del train_feature
self.X_new = pd.concat(test_feature, axis=1)
for feat in test_feature:
del feat
del test_feature
weight = None
# time spent on data/feature processing in current batch
time_spent_invariant, time_spent_variant = 0., 0.
for m in ['read_data', 'preprocess']:
time_spent_invariant += self.module_time[m][self.batch_num]
for m in ['encode_cat', 'encode_mv']:
time_spent_variant += self.module_time[m][self.batch_num]
# estimated time budget for each remaining batch
time_left = data_info['time_budget'] - (self.predict_start_time - time_info[1])
batch_left = self.total_batch_num - self.batch_num
time_per_batch = time_left / batch_left
# estimated time budget for training models
ratio = float(self.batch_window_size + 1) / (min(self.batch_num, self.batch_window_size) + 1)
time_for_model = time_per_batch - time_spent_invariant - time_spent_variant * ratio
clf = GBM(category_cols=None, hyper_tune=False, hyper_param=self.hyper_param)
if (self.batch_num == 1):
# decide boosting iterations
self.suggested_boost_round = self.time_manager.suggest_boosting_round(feature_new, label, time_for_model, self.batch_num)
self.suggested_learning_rate, self.early_stop_round = clf.suggest_learning_rate(feature_new, label, self.suggested_boost_round)
self.num_boost_round = self.early_stop_round
print ('max boost round suggested by time manager: %d' %(self.suggested_boost_round))
print ('early stop round: %d' %(self.early_stop_round))
print ('suggested learning rate: %f' %(self.suggested_learning_rate))
else:
# adjust boosting round number according to time budget
ratio = min(self.batch_window_size, self.batch_num) / (min(self.batch_window_size, self.batch_num) - 1)
estimated_train_time = self.module_time['fit'][self.batch_num - 1] * ratio
remaining_time = time_for_model - estimated_train_time
if (remaining_time > time_for_model * 0.1 and self.num_boost_round < 2000):
self.num_boost_round += 200
elif (remaining_time < 0.):
self.num_boost_round -= 100
print ('training info of batch %d: ' %(self.batch_num))
print ('feature number: %d' %(feature_new.shape[1]))
print ('train sample size: %d' %(feature_new.shape[0]))
print ('test sample size: %d' %(self.X_new.shape[0]))
print ('num boost round: %d' %(self.num_boost_round))
print ('learning rate: %f' %(self.suggested_learning_rate))
# LGB
module_start = time.time()
updated_params = {'n_estimators': self.num_boost_round, 'learning_rate': self.suggested_learning_rate}
self.hyper_param = clf.fit(feature_new, label, weight=weight, updated_params=updated_params)
module_end = time.time()
self.module_time['fit'][self.batch_num] = module_end - module_start
module_start = time.time()
y_pred = clf.predict(self.X_new)
module_end = time.time()
self.module_time['predict'][self.batch_num] = module_end - module_start
self.time_spent_ratio = (time.time() - self.predict_start_time) / time_per_batch * 100.
self.is_trained = True
return y_pred
def predict(self, F, data_info, time_info):
'''
This function should provide predictions of labels on (test) data.
Here we just return random values...
Make sure that the predicted values are in the correct format for the scoring
metric. For example, binary classification problems often expect predictions
in the form of a discriminant value (if the area under the ROC curve it the metric)
rather that predictions of the class labels themselves.
The function predict eventually can return probabilities or continuous values.
'''
self.batch_num += 1
self.predict_start_time = time.time()
self.module_time['read_data'][self.batch_num] = (self.predict_start_time - self.fit_end_time)
module_start = time.time()
F['numerical'] = self.missing_value_preprocess.transform(F['numerical'], 'numerical', input_type='ndarray')
F['CAT'] = self.missing_value_preprocess.transform(F['CAT'], 'CAT', input_type='dataframe')
if self.use_mv:
F['MV'] = self.missing_value_preprocess.transform(F['MV'], 'MV', input_type='dataframe')
F['numerical'] = data_converter.replace_missing(F['numerical']).astype('float32')
F['CAT'] = F['CAT'].fillna('-1')
module_end = time.time()
self.module_time['preprocess'][self.batch_num] = module_end - module_start
self.F = F
self.y_pred = self.transferPredict(0, data_info, time_info)
self.predict_end_time = time.time()
return self.y_pred
def save(self, path="./"):
pickle.dump(self, open(path + '_model.pickle', "w"))
def load(self, path="./"):
modelfile = path + '_model.pickle'
if isfile(modelfile):
with open(modelfile) as f:
self = pickle.load(f)
print("Model reloaded from: " + modelfile)
return self | MetaLearners/NIPS-2018-AutoML-Challenge | src/model.py | model.py | py | 12,004 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "os.system",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 50,
... |
22357222228 | #coding=utf-8
'''
#import urllib
#import urllib.request
#import urllib.parse
import requests
URL_IP = 'http://httpbin.org/ip'
URL_GET = 'http://httpbin.org/get'
def use_simple_requests():
response = requests.get(URL_IP)
print('>>>Response Headers:')
print(response.headers)
print('>>>Response Body:')
print(response.text)
def use_params_requests():
params = {'param1':'hello','param2':'你好','param3':'test'}
response = requests.get(URL_GET,params=params)
print('>>>Response Headers:')
print(response.headers)
print('>>>Status Code:')
print(response.status_code)
print('>>>Status Code reason:')
print(response.reason)
print('>>>Response Body:')
#print(response.json)
print(response.text)
if __name__ == '__main__':
print ('>>>Use simple urllib2:')
use_params_requests()
'''
'''
内置高阶函数
一:
def f(x):
return x*x
print map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])
二:
例如,编写一个f函数,接收x和y,返回x和y的和:
def f(x, y):
return x + y
调用 reduce(f, [1, 3, 5, 7, 9])时,reduce函数将做如下计算:
先计算头两个元素:f(1, 3),结果为4;
再把结果和第3个元素计算:f(4, 5),结果为9;
再把结果和第4个元素计算:f(9, 7),结果为16;
再把结果和第5个元素计算:f(16, 9),结果为25;
由于没有更多的元素了,计算结束,返回结果25。
三:
filter()函数是 Python 内置的另一个有用的高阶函数,filter()函数接收一个函数 f 和一个list,这个函数 f 的作用是对每个元素进行判断,返回 True或 False,filter()根据判断结果自动过滤掉不符合条件的元素,返回由符合条件元素组成的新list。
例如,要从一个list [1, 4, 6, 7, 9, 12, 17]中删除偶数,保留奇数,首先,要编写一个判断奇数的函数:
def is_odd(x):
return x % 2 == 1
然后,利用filter()过滤掉偶数:
filter(is_odd, [1, 4, 6, 7, 9, 12, 17])
结果:[1, 7, 9, 17]
'''
import json
import requests
from requests import exceptions
URL = 'https://api.github.com'
def build_url(endpoint):
return '/'.join([URL,endpoint])
def better_print(json_str):
return json.dumps(json.loads(json_str),indent=4)
def request_method():
response = requests.get(build_url('user/emails'),auth=('imoocdemo','imoocdemo123'))
#print(response.text)
print(better_print(response.text))
def params_request():
response = requests.get(build_url('users'),params={'since':11})
print(better_print(response.text))
print(response.request.headers)
print(response.url)
def json_request():
response = requests.patch(build_url('user'),auth=('imoocdemo','imoocdemo123'),json={'name':'baby'})
print(better_print(response.text))
print(response.request.headers)
print(response.request.body)
print(response.status_code)
def timeout_request():
try:
response = requests.get(build_url('user/emails'),timeout=10)
response.raise_for_status()
except exceptions.Timeout as e:
print(e)
except exceptions.HTTPError as e:
print(e)
else:
print(response.text)
print(response.status_code)
def hard_requests():
from requests import Request,Session
s = Session()
headers = {'User-Agent':'hys1.3.1'}
req = Request('GET',build_url('user/emails'),auth=('imoocdemo','imoocdemo123'),headers=headers)
prepped = req.prepare()
print(prepped.body)
print(prepped.headers)
resp = s.send(prepped,timeout=10)
print(resp.status_code)
print(resp.request.headers)
print(resp.text)
def error_request():
response = requests.get('http://api.github.com')
print(response.history)
def download_image():
URL = 'https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1508684596337&di=85cabd3af35d7b47e65de12719c7564a&imgtype=0&src=http%3A%2F%2Fpic.58pic.com%2F58pic%2F16%2F42%2F96%2F56e58PICAu9_1024.jpg'
response = requests.get(URL,stream=True)
print(response.status_code,response.reason)
print(response.content)
with open('demo.jpg','wb') as fd:
for chunk in response.iter_content(128):
fd.write(chunk)
def download_image_improved():
URL = 'https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1508684596337&di=85cabd3af35d7b47e65de12719c7564a&imgtype=0&src=http%3A%2F%2Fpic.58pic.com%2F58pic%2F16%2F42%2F96%2F56e58PICAu9_1024.jpg'
response = requests.get(URL,stream=True)
from contextlib import closing
with closing(requests.get(URL,stream=True)) as response:
with open('demo1.jpg','wb') as fd:
for chunk in response.iter_content(128):
fd.write(chunk)
if __name__ == '__main__':
download_image_improved() | Handsome2Hu/py | 个人学习/RequestsTest.py | RequestsTest.py | py | 4,842 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": ... |
10514583312 | import datetime
import gym
import numpy as np
from gym import spaces, error
from gym import utils
from gym.utils import seeding
from entity.time_window import time_window
from tool.kuhn_munkras import kuhn_munkras
class MatchEnv(gym.Env):
def __init__(self, max_car_num=30, max_lp_num=300, time_district_num=12, max_time_window_length=10):
self.max_time_window_length = max_time_window_length
self.action_spaces = spaces.Discrete(self.max_time_window_length)
self.observation_space = spaces.MultiDiscrete(
[max_car_num, max_lp_num, max_time_window_length, time_district_num])
self.time_window = time_window
self.matcher = kuhn_munkras(self.time_window)
"""
#Description: 时间窗口内左右节点匹配
"""
def match(self):
self.matcher.change_batch(self.time_window)
reward, match_res = self.matcher.km()
unbound_lp_list = self.time_window.node_clear(match_res)
self.time_window.drop_sent_load_plan(unbound_lp_list)
return reward
def time_minus(self, front_time_str: str, rear_time_str:str)->datetime.timedelta:
front = datetime.datetime.strptime(front_time_str, "%Y%m%d%H%M%S")
rear = datetime.datetime.strptime(rear_time_str, "%Y%m%d%H%M%S")
tmp = rear - front
return tmp
@property
def _get_obs(self):
ob = np.zeros(4,dtype=int)
time_str = self.time_window.time
ob[0] = len(time_window.car_list)
ob[1] = len(time_window.can_be_sent_load_plan)
ob[2] = int(self.time_window.time[8:10]) / 2
if len(self.time_window.car_list) != 0:
first_car_time_Str = self.time_window.car_list[0].arrive_time
time_delta = self.time_minus(first_car_time_Str, time_str)
ob[3] = int(time_delta.seconds / 60) + 1
else:
ob[3] = 0
return ob
def step(self, a):
reward = 0.0
self.time_window.get_next_min()
ob = self._get_obs
if a <= ob[2] and a != 0:
reward = self.match()
ob = self._get_obs
done = False
if time_window.time >= time_window.end_time:
done = True
return ob, reward, done, {}
def reset(self):
self.time_window.reset()
return self._get_obs
| KirsVon/DQN-Master | DQN-master/match_env.py | match_env.py | py | 2,342 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.Env",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Discrete",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "gym.spaces.MultiDiscrete"... |
24939797081 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.express as px
df = pd.read_csv("../datasets/Los_Angeles_International_Airport_-_Passenger_Traffic_By_Terminal.csv")
df['ReportPeriod']=pd.to_datetime(df['ReportPeriod'])
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='ECS 272 Assignment 3'),
html.Div(children='''
Choose a terminal you are interested in.
'''),
dcc.Dropdown(
id = 'terminal_dropdown',
options = [
{'label': 'Imperial Terminal', 'value': 'Imperial Terminal'},
{'label': 'Misc. Terminal', 'value': 'Misc. Terminal'},
{'label': 'Terminal 1', 'value': 'Terminal 1'},
{'label': 'Terminal 2', 'value': 'Terminal 2'},
{'label': 'Terminal 3', 'value': 'Terminal 3'},
{'label': 'Terminal 4', 'value': 'Terminal 4'},
{'label': 'Terminal 5', 'value': 'Terminal 5'},
{'label': 'Terminal 6', 'value': 'Terminal 6'},
{'label': 'Terminal 7', 'value': 'Terminal 7'},
{'label': 'Terminal 8', 'value': 'Terminal 8'},
{'label': 'Tom Bradley International Terminal', 'value': 'Tom Bradley International Terminal'},
],
value = 'Imperial Terminal'
),
dcc.Graph(
id='basic_graph'
),
html.Div(children='''
Choose a year you are interested in.
'''),
dcc.Slider(
id = 'year_slider',
min = 2006,
max = 2019,
marks = {i: '{}'.format(i) for i in range(2006,2020)},
value = 2006
),
dcc.Graph(
id='adv_graph'
)
])
@app.callback(
Output('basic_graph', 'figure'),
[Input('terminal_dropdown', 'value')])
def update_basic_figure(terminal):
figure={
'data': [
{'x': ["Domestic", "International"], 'y': [sum(df[df.Domestic_International == "Domestic"][df.Terminal == str(terminal)]["Passenger_Count"]),
sum(df[df.Domestic_International == "International"][df.Terminal == str(terminal)]["Passenger_Count"])],
'type': 'bar',
'name': 'temp'},
],
'layout': {
'title': 'Basic graph view'
}
}
return figure
@app.callback(
Output('adv_graph', 'figure'),
[Input('year_slider', 'value')])
def update_advanced_figure(year):
filtered_df = df[df['ReportPeriod'].dt.year == year][['Terminal', 'Domestic_International', 'Arrival_Departure']]
figure = px.parallel_categories(filtered_df, dimensions_max_cardinality = 12)
return figure
if __name__ == '__main__':
app.run_server(debug=True)
| ucdavis/ECS272-Winter2020 | Assignment3/wyin/app.py | app.py | py | 2,973 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dash.Dash",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div"... |
32109402966 | import sys
import os
import imghdr
from PyQt5.QtWidgets import QApplication, QMainWindow, QAction
from PyQt5.QtWidgets import QWidget, QDesktopWidget, QMessageBox
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QGridLayout
from PyQt5.QtWidgets import QGroupBox, QPushButton, QSlider
from PyQt5.QtWidgets import QLabel, QFileDialog, QSizePolicy, QSpacerItem
from PyQt5.QtGui import QIcon, QDrag, QPixmap, QImage
from PyQt5.QtCore import Qt, pyqtSignal
from timelapse_processing import ImageList, Image, loadImage, toRGB
"""
QApplication: manages application object.
QWidget: base class of all user interface objects. Receives events from the window system.
QMainWindow: main application window - framework to build the apps' user interface.
QDesktopWidget: provides access to user screen information.
"""
class DropButton(QPushButton):
"""
Drag n Drop area widget
"""
itemDropped = pyqtSignal(list)
def __init__(self, title, width, height, parent):
super().__init__(title, parent)
self.setVisible(True)
self.setAcceptDrops(True)
self.setStyleSheet("background-color: rgba(85, 153, 255, 10%);"
"border-style: dashed;"
"border-width: 1px;"
"border-color: gray;"
"color: gray;")
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setMinimumSize(width, height)
def dragEnterEvent(self, e):
m = e.mimeData()
if m.hasUrls():
e.accept()
else:
e.ignore()
def dropEvent(self, e):
m = e.mimeData()
if m.hasUrls():
e.accept()
links = []
[links.append(u.toLocalFile()) for u in m.urls()]
self.itemDropped.emit(links)
self.setVisible(False)
else:
e.ignore()
class TimelapseApp(QMainWindow):
"""
Timelapse exposure fix application window
"""
def __init__(self):
super().__init__()
self.title = 'Timelapse Exposure Fix'
self.icon = 'camera_icon.png'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.minSize = 100
self.imgScale = 0.45
self.origImages = ImageList()
self.processedImages = ImageList()
self.imageFormat = ''
self.initUI()
def initUI(self):
# Menu bar
mainMenu = self.menuBar()
self.fileMenu = mainMenu.addMenu('File')
reloadAct = QAction('New Session', self)
reloadAct.setShortcut('Ctrl+R')
reloadAct.triggered.connect(self.reloadSession)
reloadAct.setStatusTip('Reload a new session for timelapse processing')
self.fileMenu.addAction(reloadAct)
saveAct = QAction('Save Images', self)
saveAct.setShortcut('Ctrl+S')
saveAct.triggered.connect(self.saveImages)
saveAct.setStatusTip('Save processed images')
self.fileMenu.addAction(saveAct)
saveAct.setDisabled(True)
exitAct = QAction('Exit', self)
exitAct.setShortcut('Ctrl+Q')
exitAct.triggered.connect(self.close)
exitAct.setStatusTip('Exit timelapse processing tool')
self.fileMenu.addAction(exitAct)
helpMenu = mainMenu.addMenu('Help')
aboutAct = QAction('Drag n Drop', self)
aboutAct.triggered.connect(self.helpWindow)
aboutAct.setStatusTip('Drag n Drop information')
helpMenu.addAction(aboutAct)
# Grid layout
self.createGridLayout()
self.dragndrop.itemDropped.connect(self.pictureDropped)
# Status bar
self.statusBar()
self.statusBar().setStyleSheet("background-color: white;")
# Window settings
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowIcon(QIcon(self.icon))
self.center()
self.show()
def createGridLayout(self):
self.centralWidget = QWidget(self)
self.centralWidget.setStyleSheet("background-color: white;")
self.centralWidget.setStatusTip('Ready')
self.setCentralWidget(self.centralWidget)
self.sld = QSlider(Qt.Horizontal, self)
mainLayout = QHBoxLayout()
vLayout = QVBoxLayout()
grid = QGridLayout()
grid.setSpacing(25)
grid.setContentsMargins(25, 25, 25, 25)
self.dragndrop = DropButton('Drop images here', self.width - self.minSize, self.minSize, self)
grid.addWidget(self.dragndrop, 0, 0, 1, 4)
self.viewerGroupBox = QGroupBox('Time-lapse viewer')
self.img1 = QLabel(self.centralWidget)
self.img1.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.img1.setMinimumSize(self.minSize, self.minSize)
self.img2 = QLabel(self.centralWidget)
self.img2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.img2.setMinimumSize(self.minSize, self.minSize)
imgviewer = QHBoxLayout()
leftSpacer = QSpacerItem(self.minSize/10, self.minSize, QSizePolicy.Expanding, QSizePolicy.Minimum)
imgviewer.addSpacerItem(leftSpacer)
imgviewer.addWidget(self.img1)
imgviewer.addWidget(self.img2)
rightSpacer = QSpacerItem(self.minSize/10, self.minSize, QSizePolicy.Expanding, QSizePolicy.Minimum)
imgviewer.addSpacerItem(rightSpacer)
self.viewerGroupBox.setLayout(imgviewer)
self.viewerGroupBox.setVisible(False)
grid.addWidget(self.viewerGroupBox, 2, 1)
grid.addWidget(self.sld, 3, 1)
self.sld.setValue(0)
self.sld.setRange(0,0)
self.sld.valueChanged.connect(self.updateViewerIndex)
self.sld.setVisible(False)
# Encapsulate grid layout in VBox and HBox
vLayout.addLayout(grid)
verticalSpacer = QSpacerItem(self.minSize, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
vLayout.addItem(verticalSpacer)
hLeftSpacer = QSpacerItem(self.minSize/10, self.minSize, QSizePolicy.Expanding, QSizePolicy.Minimum)
mainLayout.addItem(hLeftSpacer)
mainLayout.addLayout(vLayout)
hRightSpacer = QSpacerItem(self.minSize/10, self.minSize, QSizePolicy.Expanding, QSizePolicy.Minimum)
mainLayout.addItem(hRightSpacer)
self.centralWidget.setLayout(mainLayout)
def updateViewerIndex(self):
self.updateViewer(self.sld.value())
def updateViewer(self, imageNumber):
if len(self.origImages) > 0:
rgb = toRGB(self.origImages[imageNumber].img)
qimage = QImage(rgb, rgb.shape[1], rgb.shape[0], QImage.Format_RGB888)
pixmap1 = QPixmap(qimage)
pixmap1 = pixmap1.scaledToWidth(self.width * self.imgScale)
self.img1.setPixmap(pixmap1)
if len(self.processedImages) > 0:
rgb = toRGB(self.processedImages[imageNumber].img)
qimage = QImage(rgb, rgb.shape[1], rgb.shape[0], QImage.Format_RGB888)
pixmap2 = QPixmap(qimage)
pixmap2 = pixmap2.scaledToWidth(self.width * self.imgScale)
self.img2.setPixmap(pixmap2)
def pictureDropped(self, links):
self.statusBar().showMessage('Processing...')
self.imageFormat = imghdr.what(links[0])
[self.origImages.append(Image(loadImage(link))) for link in links]
newImages = ImageList(self.origImages[:])
newImages.computeStats()
newImages.fixExposure()
self.processedImages = newImages
self.fileMenu.actions()[1].setDisabled(False)
self.updateViewer(0)
self.sld.setRange(0, len(self.origImages) - 1)
self.sld.setValue(0)
self.statusBar().showMessage('Ready')
self.viewerGroupBox.setVisible(True)
self.sld.setVisible(True)
def saveImages(self):
self.statusBar().showMessage('Saving Images...')
newDir = '/processed-images'
destDir = QFileDialog.getExistingDirectory(self, "Select Directory") + newDir
if not os.path.exists(destDir):
os.makedirs(destDir)
for i,obj in enumerate(self.processedImages):
rgb = toRGB(obj.img)
qimage = QImage(rgb, rgb.shape[1], rgb.shape[0], QImage.Format_RGB888)
qimage.save(destDir + '/processed_image' + str(i+1).zfill(4) + '.' + self.imageFormat)
self.statusBar().showMessage('Ready')
def reloadSession(self):
mboxtitle = 'Warning'
mboxmsg = 'Are you sure you want to start a new session?\nAll unsaved changes will be lost.'
reply = QMessageBox.warning(self, mboxtitle, mboxmsg,
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
del self.origImages[:]
del self.processedImages[:]
self.img1.clear()
self.img2.clear()
self.sld.setValue(0)
self.sld.setRange(0,0)
self.dragndrop.setVisible(True)
self.viewerGroupBox.setVisible(False)
self.sld.setVisible(False)
def helpWindow(self):
mboxtitle = 'Help'
mboxmsg = ('If your time-lapse is not displaying in the proper order, or there seems to be a jump '
'cut to previous frames, make sure the last item clicked when the images were dragged was '
'the first image of your sequence. The order of selection is preserved during drag and drop.')
reply = QMessageBox.information(self, mboxtitle, mboxmsg,
QMessageBox.Ok, QMessageBox.Ok)
def center(self):
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.move(qtRectangle.topLeft())
def closeEvent(self, event):
mboxtitle = 'Message'
mboxmsg = 'Are you sure you want to quit?'
reply = QMessageBox.warning(self, mboxtitle, mboxmsg,
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = TimelapseApp()
sys.exit(app.exec_()) | laggui/timelapse-processing | timelapse_gui.py | timelapse_gui.py | py | 10,427 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy.Expanding",
"line_number": 35,
"usage_type": "attribute"
... |
73948619555 | import matplotlib.patches as patches
import matplotlib.pyplot as plt
path = [
[.1, .3],
[.2, .9],
[.8, .4],
]
fig = plt.figure()
ax = fig.gca()
ax.add_patch(patches.Polygon(path))
fig.savefig("triangle_patch.png", dpi=150)
plt.close()
path = [
[.1, .3],
[.2, .9],
[.8, .4],
]
fig = plt.figure()
ax = fig.gca()
ax.add_patch(patches.Polygon(
path,
alpha=.7,
edgecolor="darkblue",
facecolor="red",
hatch="+",
joinstyle="miter",
linestyle="--",
linewidth=5,
))
fig.savefig("fancy_patch.png", dpi=150)
plt.close()
| brohrer/taming_matplotlib | patch_examples.py | patch_examples.py | py | 552 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Polygon",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": ... |
14404366448 | #!/usr/bin/env python3
#python crawl.py --iocp http://www.website.com/
import asyncio
import logging
import re
import signal
import os
import sys
import urllib.parse
import aiohttp
@asyncio.coroutine
def download(url,data):
filename=url.replace('http://','').replace('https://','')
path=filename.split('/')
path='/'.join(path[0:-1])
if not os.path.exists(path):os.makedirs(path)
if os.path.exists(filename) or os.path.exists(filename+".html"):return
if not filename.endswith('.html'):filename += 'index.html'
with open(filename,'w') as f:
f.write(data)
class Crawler:
def __init__(self, rooturl, loop, maxtasks=100):
self.rooturl = rooturl
self.loop = loop
self.todo = set()
self.busy = set()
self.done = {}
self.tasks = set()
self.sem = asyncio.Semaphore(maxtasks, loop=loop)
# connector stores cookies between requests and uses connection pool
self.session = aiohttp.ClientSession(loop=loop)
@asyncio.coroutine
def run(self):
t = asyncio.ensure_future(self.addurls([(self.rooturl, '')]),
loop=self.loop)
yield from asyncio.sleep(1, loop=self.loop)
while self.busy:
yield from asyncio.sleep(1, loop=self.loop)
yield from t
yield from self.session.close()
self.loop.stop()
@asyncio.coroutine
def addurls(self, urls):
# print(urls)
for url, parenturl in urls:
url = urllib.parse.urljoin(parenturl, url)
url, _ = urllib.parse.urldefrag(url)
if (
# url.startswith(self.rooturl) and
url not in self.busy and
url not in self.done and
url not in self.todo):
self.todo.add(url)
yield from self.sem.acquire()
task = asyncio.ensure_future(self.process(url), loop=self.loop)
task.add_done_callback(lambda t: self.sem.release())
task.add_done_callback(self.tasks.remove)
self.tasks.add(task)
@asyncio.coroutine
def process(self, url):
print('processing:', url)
self.todo.remove(url)
self.busy.add(url)
try:
resp = yield from self.session.get(url)
except Exception as exc:
print('...', url, 'has error', repr(str(exc)))
self.done[url] = False
else:
if (resp.status == 200 and
('text/html' in resp.headers.get('content-type'))):
data = (yield from resp.read()).decode('utf-8', 'replace')
# print(url)
yield download(url,data)
urls = re.findall(r'(?i)href=["\']?([^\s"\'<>]+)', data)
# print(urls)
asyncio.Task(self.addurls([(u, url) for u in urls]))
resp.close()
self.done[url] = True
self.busy.remove(url)
print(len(self.done), 'completed tasks,', len(self.tasks),
'still pending, todo', len(self.todo))
def main():
loop = asyncio.get_event_loop()
c = Crawler(sys.argv[1], loop)
asyncio.ensure_future(c.run(), loop=loop)
try:
loop.add_signal_handler(signal.SIGINT, loop.stop)
except RuntimeError:
pass
loop.run_forever()
print('todo:', len(c.todo))
print('busy:', len(c.busy))
print('done:', len(c.done), '; ok:', sum(c.done.values()))
print('tasks:', len(c.tasks))
if __name__ == '__main__':
if '--iocp' in sys.argv:
from asyncio import events, windows_events
sys.argv.remove('--iocp')
logging.info('using iocp')
el = windows_events.ProactorEventLoop()
events.set_event_loop(el)
main()
| wurui1994/record | Python/Spider/asyncio_crawl.py | asyncio_crawl.py | py | 3,835 | python | en | code | 29 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_nu... |
42758230743 | import keras
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
#设置随机数种子
np.random.seed(7)
#数据路径
data_file="pima-indians-diabetes.csv"
#导入数据
dataset=np.loadtxt(data_file,encoding='utf-8',delimiter=',')
#分割X和y
X=dataset[:,0:8]
y=dataset[:,8]
#10折,训练集和验证集总数共为10
kfold=StratifiedKFold(n_splits=10,shuffle=True,random_state=7)
cvscores=[]
for train,validation in kfold.split(X,y):
# 创建模型
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(6, activation='relu'))
# 最后一层dense,sigmoid输出患病概率
model.add(Dense(1, activation='sigmoid'))
# 编译模型
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# 训练模型
model.fit(X, y, epochs=150, batch_size=10, verbose=0)
# 评估模型
scores = model.evaluate(X[validation], y[validation],verbose=0)
#输出评估结果
print('\n%s : %.2f%%' % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1]*100)
#输出均值和标准差
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores),np.std(cvscores)))
| renxingkai/KerasDLDemo | chapter05_Evaluate/KFold.py | KFold.py | py | 1,367 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selec... |
43673538115 | from flask import jsonify, make_response, Blueprint, abort, redirect, request
from atexit import register
from time import sleep
from threading import Thread
from flasgger import swag_from
import utils
import logging
import json
bp = Blueprint('db', __name__)
search_q = utils.RedisQueue('searched')
expand_q = utils.RedisQueue('expanded')
def on_startup():
def acquire_user(q, threshold):
while True:
try:
if q.qsize() < threshold:
rows = db.view(f'tree/{q.name}', limit=threshold, reduce=False,
include_docs=True)[[False]:[False, {}]].rows
users = []
for row in rows:
user = row.doc
user[q.name] = 'queue'
users.append(user)
if users: # wait for 5 seconds to next fetch
results = db.update(users)
logging.warning(
f'Put {sum([r[0] for r in results])} into {q.name} queue ...')
for user, (suc, uid, rev) in zip(users, results):
if suc:
user['_rev'] = rev
q.put(user)
else:
sleep(5)
else:
sleep(1)
except Exception as e:
logging.error(repr(e))
sleep(1)
Thread(target=acquire_user, daemon=True, args=(search_q, 200,)).start()
Thread(target=acquire_user, daemon=True, args=(expand_q, 10,)).start()
@register
def on_exit():
for q in [search_q, expand_q]:
users = []
while not q.empty():
user = json.loads(q.get())
user['searched'] = False
users.append(user)
result = db.update(users)
logging.warning(
f'Put back {sum([r[0] for r in result])} users from {q.name} queue to db.')
db = utils.db(name='users')
on_startup()
@bp.route('/monitor/<level>/')
@swag_from('docs/db_monitor.yml')
def monitor(level):
search = []
for item in db.view('tree/searched', group_level=level):
search.append({str(item.key): item.value})
return {
'# of Docs': utils.db().info()['doc_count'],
'Search': search
}
@bp.route('/next_search')
@swag_from('docs/next_search.yml')
def next_search():
user = search_q.get_nowait()
return user if user else {}
@bp.route('/next_expand')
@swag_from('docs/next_expand.yml')
def next_expand():
user = expand_q.get_nowait()
return user if user else {}
@bp.route('/queue_stat')
@swag_from('docs/q_stat.yml')
def q_stat():
return {'search': search_q.qsize(), 'expand': expand_q.qsize()}
@bp.route('/active_tasks')
def active_tasks():
return redirect(f'{utils.base()}/_active_tasks')
| Taylorrrr/COMP90024-2020S1-Team22 | backend/db.py | db.py | py | 2,910 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.RedisQueue",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.RedisQueue",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.warning",
... |
70323927393 | # -*- coding: utf-8 -*-
""" Autoruns
2015 fightnight
2022 bittor7x0"""
import xbmc,xbmcvfs,xbmcaddon,xbmcgui,xbmcplugin,urllib.request,urllib.parse,urllib.error,os,re,sys
import xml.etree.ElementTree as ET
SERVICE_DISABLED = 'Autoruns_service_disabled'
def list_addons():
#info directory
addDir('[COLOR blue][B]%s[/B][/COLOR]' % (translate(30001)),'None',None,xbmcaddon.Addon().getAddonInfo('icon'))
#get the path of addons
pathofaddons = xbmcvfs.translatePath('special://home/addons')
#list with addons
listofaddons = os.listdir(pathofaddons)
for individual_addon in listofaddons:
#path to individual addon, cycle for all the addons
path_to_addon = os.path.join(pathofaddons, individual_addon)
#define addon.xml path
addon_xml_path=os.path.join(path_to_addon,'addon.xml')
#check the existence of addon.xml, if true, we continue
if os.path.exists(addon_xml_path):
#get addon.xml content
xml_content=openfile(addon_xml_path)
#get icon from addon.xml because creating a xbmcaddon.Addon(individual_addon) object causes the exception "Unknown addon id 'xxxx'" with installed addons but not enabled
root = ET.fromstring(xml_content)
xml_icon = root.findall("./extension/assets/icon")
addon_icon_path = os.path.join(path_to_addon,xml_icon[0].text if xml_icon else 'icon.png')
if re.search(SERVICE_DISABLED,xml_content):
#addon with service off
addDir('[B][COLOR gold]%s[/B] (off)[/COLOR]' % (individual_addon),path_to_addon,1,addon_icon_path)
elif re.search('point="xbmc.service"',xml_content):
#addon with service on
addDir('%s (on)' % (individual_addon),path_to_addon,1,addon_icon_path)
else:
#addon with no service
pass
def change_state(name,path):
#define addon.xml path to change
addon_xml_path=os.path.join(path,'addon.xml')
#get addon.xml content
content=openfile(addon_xml_path)
if re.search('COLOR gold',name):
#service off to on, so we uncomment the service element
content=content.replace('<!--%s ' % (SERVICE_DISABLED),'').replace(' %s-->' % (SERVICE_DISABLED),'')
else:
#service on to off, so we comment the service element
root = ET.fromstring(content)
parent = root.findall("./extension[@point='xbmc.service']/..")[0]
child = parent.findall("./extension[@point='xbmc.service']")[0]
commented = '%s \n%s %s' % (SERVICE_DISABLED, ET.tostring(child).decode("utf-8"), SERVICE_DISABLED)
parent.remove(child)
parent.append(ET.Comment(commented))
content=ET.tostring(root, encoding='utf8', method='xml')
#change state on addon.xml
savefile(addon_xml_path,content)
#refresh the list
xbmc.executebuiltin("Container.Refresh")
def openfile(path_to_the_file):
try:
fh = xbmcvfs.File(path_to_the_file, 'rb')
contents=fh.read()
fh.close()
return contents
except:
print("Wont open: %s" % filename)
return None
def savefile(path_to_the_file,content):
try:
fh = xbmcvfs.File(path_to_the_file, 'wb')
fh.write(content)
fh.close()
except: print("Wont save: %s" % filename)
def addDir(name,path,mode,iconimage):
listItem = xbmcgui.ListItem(label=name)
listItem.setArt({'icon': "DefaultFolder.png"})
listItem.setArt({'thumb': iconimage})
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url="%s?path=%s&mode=%s&name=%s" % (sys.argv[0],urllib.parse.quote_plus(path),mode,urllib.parse.quote_plus(name)),listitem=listItem,isFolder=False)
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def translate(text):
return xbmcaddon.Addon().getLocalizedString(text)
params=get_params()
path=None
name=None
mode=None
try: path=urllib.parse.unquote_plus(params["path"])
except: pass
try: name=urllib.parse.unquote_plus(params["name"])
except: pass
try: mode=int(params["mode"])
except: pass
if mode==None: list_addons()
elif mode==1: change_state(name,path)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| bittor7x0/kodi.script.autoruns | default.py | default.py | py | 4,990 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "xbmcaddon.Addon",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "xbmcvfs.translatePath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"... |
42469206289 | import threading
import numpy as np
import cv2
import time
class CameraReader(threading.Thread):
def __init__(self,dev):
self.dev = dev
self.lock = threading.Lock()
self.frame = np.empty((480,640,3), dtype=np.uint8)
self.running = False
self.cap = None
threading.Thread.__init__(self)
def run(self):
while self.running:
self.lock.acquire()
ret, _ = self.cap.read(self.frame)
self.lock.release()
time.sleep(0.01)
def __enter__(self):
self.cap = cv2.VideoCapture(self.dev)
self.running = True
self.start()
return self
def __exit__(self,exception_type, exception_value, traceback):
self.stop()
if self.cap is not None:
self.cap.release()
def read(self,frame=None):
self.lock.acquire()
if frame is None:
frame = self.frame.copy()
else:
np.copyto(frame,self.frame)
self.lock.release()
return frame
def stop(self):
self.running = False
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
class ROSCameraReader(object):
def __init__(self, topic):
self.topic = topic
self.sub = None
self.bridge = CvBridge()
self.img = np.zeros((480,640,3), dtype=np.uint8)
def __enter__(self):
self.sub = rospy.Subscriber(self.topic, Image, self.img_cb)
return self
def __exit__(self,a,b,c):
if self.sub is not None:
self.sub.unregister()
self.sub = None
def img_cb(self,msg):
self.img = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
def read(self,frame):
np.copyto(frame, self.img)
| olinrobotics/irl | irl_auxiliary_features/irl_stereo/scripts/stereo_utils/camera_reader.py | camera_reader.py | py | 1,780 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "threading.Thread",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "threading.Lock",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"lin... |
39674304363 | from pymongo import MongoClient
import numpy as np
import datetime
client = MongoClient()
db = client.twitter
cursor = db.tweets.aggregate(
[{
"$project": {
"y": { "$year": "$timestamp_obj"},
"m": { "$month": "$timestamp_obj"},
"d": { "$dayOfMonth": "$timestamp_obj" },
"h": { "$hour": "$timestamp_obj" }
}
},
{
"$group": {
"_id": { "year": "$y", "month": "$m", "day": "$d", "hour": "$h" },
"count": { "$sum": 1 }
}
},
{
"$sort": {
"_id.year": -1,
"_id.month": 1,
"_id.day": 1,
"_id.hour": 1
}
}]
)
hours = [[] for i in range(24)]
counts = {}
earliest, latest = None, None
for doc in cursor:
d = doc['_id']
time = datetime.datetime(int(d['year']), int(d['month']), int(d['day']), int(d['hour']))
counts[time] = doc['count']
if earliest == None or time < earliest: earliest = time
if latest == None or time > latest: latest = time
hours[int(d['hour'])].append(int(doc['count']))
print (str(doc).encode('utf-8'))
for i in range(0, len(hours)):
hours[i] = sum(hours[i]) / len(hours[i])
starttime = earliest
sortedcounts = []
sortedhours = []
while not starttime >= latest:
try:
sortedcounts.append(counts[starttime])
sortedhours.append(hours[starttime.hour])
except:
sortedcounts.append(0)
sortedhours.append(0)
starttime += datetime.timedelta(hours=1)
N = len(sortedcounts)
# sample spacing
T = 1.0 / 100.0
yf = np.fft.fft(sortedcounts) # fft computing and normalization
xf = np.linspace(0.0, 1.0/(2.0*T), N/2)
yf[0] = 0
import matplotlib.pyplot as plt
fig, ax = plt.subplots(4, 1)
ax[0].plot(sortedcounts)
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Count')
ax[1].plot(xf, 2.0/N * np.abs(yf[:N/2])) # plotting the spectrum
print(yf)
for i in range(0, 27):
yf[i] = 0
for i in range(28, len(yf)-1):
yf[i] = 0
ax[2].plot(xf, 2.0/N * np.abs(yf[:N/2]), 'rx') # plotting the spectrum
ixf = np.fft.ifft(yf)
ax[3].plot(ixf)
ax[3].plot(sortedcounts)
ax[3].plot(sortedhours)
ax[3].set_xlabel('Time')
ax[3].set_ylabel('Count')
plt.show()
| Humpheh/twied | scripts/processing/fourier.py | fourier.py | py | 2,223 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.fft.fft... |
3824690503 | # -*- coding: utf-8 -*-
from ..enums import ScanTypesEnum
from ..errors import ClosedProcess
from ..process import AbstractProcess
from .functions import (
get_memory_regions,
read_process_memory,
search_addresses_by_value,
search_values_by_addresses,
write_process_memory
)
from typing import Generator, Optional, Sequence, Tuple, Type, TypeVar, Union
T = TypeVar("T")
class LinuxProcess(AbstractProcess):
"""
Class to open a Linux process for reading and writing memory.
"""
def __init__(
self,
*,
window_title: Optional[str] = None,
process_name: Optional[str] = None,
pid: Optional[int] = None,
**kwargs
):
"""
:param window_title: window title of the target program.
:param process_name: name of the target process.
:param pid: process ID.
"""
super().__init__(
window_title = window_title,
process_name = process_name,
pid = pid
)
self.__closed = False
def close(self) -> bool:
"""
Close the process handle.
"""
self.__closed = True
return True
def get_memory_regions(self) -> Generator[dict, None, None]:
"""
Generates dictionaries with the address, size and other
information of each memory region used by the process.
"""
if self.__closed: raise ClosedProcess()
return get_memory_regions(self.pid)
def read_process_memory(
self,
address: int,
pytype: Type[T],
bufflength: int
) -> T:
"""
Return a value from a memory address.
:param address: target memory address (ex: 0x006A9EC0).
:param pytype: type of the value to be received (bool, int, float, str or bytes).
:param bufflength: value size in bytes (1, 2, 4, 8).
"""
if self.__closed: raise ClosedProcess()
return read_process_memory(self.pid, address, pytype, bufflength)
def search_by_addresses(
self,
pytype: Type[T],
bufflength: int,
addresses: Sequence[int],
*,
raise_error: bool = False,
) -> Generator[Tuple[int, Optional[T]], None, None]:
"""
Search the whole memory space, accessible to the process,
for the provided list of addresses, returning their values.
"""
if self.__closed: raise ClosedProcess()
return search_values_by_addresses(self.pid, pytype, bufflength, addresses, raise_error=raise_error)
def search_by_value(
self,
pytype: Type[T],
bufflength: int,
value: Union[bool, int, float, str, bytes],
scan_type: ScanTypesEnum = ScanTypesEnum.EXACT_VALUE,
*,
progress_information: bool = False,
writeable_only: bool = False,
) -> Generator[Union[int, Tuple[int, dict]], None, None]:
"""
Search the whole memory space, accessible to the process,
for the provided value, returning the found addresses.
:param pytype: type of value to be queried (bool, int, float, str or bytes).
:param bufflength: value size in bytes (1, 2, 4, 8).
:param value: value to be queried (bool, int, float, str or bytes).
:param scan_type: the way to compare the values.
:param progress_information: if True, a dictionary with the progress information will be return.
:param writeable_only: if True, search only at writeable memory regions.
"""
if self.__closed: raise ClosedProcess()
if scan_type in [ScanTypesEnum.VALUE_BETWEEN, ScanTypesEnum.NOT_VALUE_BETWEEN]:
raise ValueError("Use the method search_by_value_between(...) to search within a range of values.")
return search_addresses_by_value(self.pid, pytype, bufflength, value, scan_type, progress_information, writeable_only)
def search_by_value_between(
self,
pytype: Type[T],
bufflength: int,
start: Union[bool, int, float, str, bytes],
end: Union[bool, int, float, str, bytes],
*,
not_between: bool = False,
progress_information: bool = False,
writeable_only: bool = False,
) -> Generator[Union[int, Tuple[int, dict]], None, None]:
"""
Search the whole memory space, accessible to the process,
for a value within the provided range, returning the found addresses.
:param pytype: type of value to be queried (bool, int, float, str or bytes).
:param bufflength: value size in bytes (1, 2, 4, 8).
:param start: minimum inclusive value to be queried (bool, int, float, str or bytes).
:param end: maximum inclusive value to be queried (bool, int, float, str or bytes).
:param not_between: if True, return only addresses of values that are NOT within the range.
:param progress_information: if True, a dictionary with the progress information will be return.
:param writeable_only: if True, search only at writeable memory regions.
"""
if self.__closed: raise ClosedProcess()
scan_type = ScanTypesEnum.NOT_VALUE_BETWEEN if not_between else ScanTypesEnum.VALUE_BETWEEN
return search_addresses_by_value(self.pid, pytype, bufflength, (start, end), scan_type, progress_information, writeable_only)
def write_process_memory(
self,
address: int,
pytype: Type[T],
bufflength: int,
value: Union[bool, int, float, str, bytes]
) -> T:
"""
Write a value to a memory address.
:param address: target memory address (ex: 0x006A9EC0).
:param pytype: type of value to be written into memory (bool, int, float, str or bytes).
:param bufflength: value size in bytes (1, 2, 4, 8).
:param value: value to be written (bool, int, float, str or bytes).
"""
if self.__closed: raise ClosedProcess()
return write_process_memory(self.pid, address, pytype, bufflength, value)
| JeanExtreme002/PyMemoryEditor | PyMemoryEditor/linux/process.py | process.py | py | 6,073 | python | en | code | 20 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "process.AbstractProcess",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Optiona... |
19918807345 | import numpy as np
import matplotlib.pyplot as plt
# 1. Linear regression for classifying noisy data
fig, axs = plt.subplots(3)
N = 100 # data set size
d = 2 # 2 classes of data
# Generate random training data
X = np.random.uniform(-1, 1, size=(N, d+1))
X[:, 0] = 1
# Calculate weights vector
w = np.random.uniform(-1, 1, size=(d+1))
# Compute true labels for the training data
Y = np.sign(np.dot(X, w))
# Add Noise
n = np.random.randint(0, 100, 1)
Y[n[0]] = Y[n[0]]*-1
ind_pos = np.where(Y == 1)[0] # positive examples
ind_neg = np.where(Y == -1)[0] # negative examples
# Plot points
axs[0].plot(X[ind_pos, 1], X[ind_pos, 2], 'ro') # red dot points
axs[0].plot(X[ind_neg, 1], X[ind_neg, 2], 'bx') # blue 'x' points
# Generate random target function: f(x) = w^Tx
X2 = (-w[1]/w[2]*X) - w[0]/w[2]
line_x = np.linspace(-1, 1, 100)
# Plot target function
axs[0].plot(X, X2, label='Target fxn', color='yellow')
# Perceptron algorithm
def perceptron(X, Y, pocket=False):
# List of in-sample errors
eins = []
best_ein = 1000
# To be trained
w_train = np.random.uniform(-1, 1, size=(d + 1))
best_w = w_train
for i in range(1000):
Y1 = np.sign(np.dot(X, w_train))
# Check for misclassified point
j = np.random.randint(0, 100, 1)
if Y1[j] != Y[j]:
# Update hypothesis
w_train += X[j][0]*Y[j]
# Calculate error
ein = np.count_nonzero(Y1-Y) / 100
if pocket:
if ein < best_ein:
best_w = w_train
best_ein = ein ###
eins.append(best_ein if pocket else ein)
return best_w if pocket else w_train, eins
# Run PLA
l = np.linspace(0, 1000, 1000)
result = perceptron(X, Y, pocket=False)
g = result[0]
g_x = -(g[0]/g[2]) + (-(g[1]*X)/g[2])
ein_pla = result[1]
axs[0].plot(X, g_x, label='PLA final hypothesis', color='orange')
axs[1].plot(l, ein_pla, label='Ein PLA', color='green')
# Run Pocket Algorithm
result2 = perceptron(X, Y, pocket=True)
g2 = result2[0]
g_x2 = -(g2[0]/g2[2]) + (-(g2[1]*X)/g2[2])
ein_pocket = result2[1]
axs[0].plot(X, g_x2, label='Pocket final hypothesis', color='blue')
axs[2].plot(l, ein_pocket, label='Ein Pocket', color='red')
# Linear Regression eqn 3.4
Xdag = np.matmul(np.linalg.pinv(np.matmul(X.transpose(), X)), X.transpose())
w = np.matmul(Xdag, Y)
wTx = np.matmul(w, X.transpose())
xw = np.matmul(X, w)
p1 = np.matmul(wTx, xw)
p2 = np.multiply(np.matmul(wTx, Y), 2)
p3 = np.matmul(Y.transpose(), Y)
ein_lin_reg = np.add(np.subtract(p1, p2), p3)/100
print(ein_lin_reg)
# Plot Linear Regression
lin_reg = (-(w[0])/w[2]) + (-w[1]*X/w[2])
axs[0].plot(X, lin_reg, label='Linear Regression', color='purple')
plt.show()
| kromer-creator/Machine-Learning-Projects | Linear Regression, PLA, and Pocket Algorithm/Lab3_Code.py | Lab3_Code.py | py | 2,736 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.random.uniform",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy... |
15952571818 | import socket
from time import sleep
# Keyence Scanner ASCII Commands ------------------------------------------------------
"""
Error Format: ER,COMMAND,ERROR_CODE
Error Codes:
------------
00 Undefined command received
01 Mismatched command format (Invalid number of parameters)
02 The parameter 1 value exceeds the set value
03 The parameter 2 value exceeds the set value
04 Parameter 2 is not set in HEX (hexadecimal) code
05 Parameter 2 set in HEX (hexadecimal) code but exceeds the set value
10 There are 2 or more ! marks in the preset data, Preset data is incorrect
11 Area specification data is incorrect
12 Specified file does not exist
13 "mm" for the %Tmm-LON,bb command exceeds the setting range.
14 Communication cannot be checked with the %Tmm-KEYENCE command.
20 This command is not executable in the current status (execution error)
21 The buffer has overflowed, so commands cannot be executed
22 An error occurred while loading or saving parameters, so commands cannot be executed
23 Commands sent from RS-232C cannot be received because AutoID Network Navigator is being connected.
99 SR-1000 Series may be faulty. Please contact your nearest KEYENCE sales office
"""
# Read Serial Number
TRIGGER_INPUT_ON = b'LON\r'
TRIGGER_INPUT_OFF = b'LOFF\r'
REQUEST_SCANNER_VERSION = b'KEYENCE\r'
# Response: OK,FTUNE
REQUEST_FOCUS_ADJUSTMENT = b'FTUNE\r'
# Set the timing mode to "One-shot trigger" (one-shot signal trigger)
SET_ONESHOT_TRIGGER = b'WP,101,1\r'
# Check if one shot trigger is set
IS_ONESHOT_TRIGGER = b'RP,101\r'
SET_TIME_APPENDING = b'WP,300,1\r'
IS_TIME_APPENDING = b'RP,300,1\r'
""" Response:
OK,NUM,a,b,c,d,e
a: OK count
b: NG count
c: ERROR count
d: STABLE count
e: Trigger input count (0 to 65535)
"""
READING_HISTORY = b'NUM\r'
""" Response:
OK,ERRSTAT,m
m = None: No error
system: System error
update: Update error
cfg: Set value error
ip: IP address duplication
over: Buffer overflow
plc: PLC link error
profinet: PROFINET error
lua: Script error
hostconnect: Host connection erro
"""
GET_ERROR_STATUS = b'ERRSTAT\r'
# -----------------------------------------------------------------------------
SOCKET_TIMEOUT = 10
"""
Class for Communication with an SR1000 Keyence Scanner
"""
class Scanner:
def __init__(self, ip_addr, port, logger=None):
self.ip = ip_addr
self.port = port
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(SOCKET_TIMEOUT)
self.connected = False
self.log = logger
self.connect()
# Initiate a connection to the SR1000
def connect(self):
try:
self.conn.connect((self.ip, self.port)) # connect to the server
sleep(.1)
# print(self.rx())
self.connected = True
print(f"Connected to {self.ip}:{self.port}")
except socket.error as er:
print(f"Connection Failed: {er}")
def tx(self, message):
self.conn.sendall(message) # send message
def rx(self):
data = self.conn.recv(1024).decode() # receive response
# print('Received from scanner: ' + data)
return data
# Send/Receive a Message
def req(self, message):
self.tx(message) # send TCP message
sleep(.1)
data = self.rx() # recieve TCP message
return data
# self.conn.close() # close the connection
# Request Status
def get_status(self):
# return self.req(REQUEST_STATUS)
pass
# Send/Receive a Message
def read_code(self):
return str(self.req(TRIGGER_INPUT_ON))
def error_type(self):
return self.req(GET_ERROR_STATUS).replace('OK,ERRSTAT,', '')
if __name__ == '__main__':
from dotenv import dotenv_values
c = dotenv_values()
s = Scanner(c["SR1000_SCANNER_IP"], int(c["SR1000_SCANNER_PORT"]))
try:
while True:
# Fetch serial number
print(s.read_code())
sleep(1)
except KeyboardInterrupt:
print("Done")
| brianteachman/serial_printer_controller | sr1000.py | sr1000.py | py | 4,206 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "socket.socket",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "time.sleep"... |
27885172320 | import tweepy
import time
#creds
auth = tweepy.OAuthHandler('F65nYoxP5rSF2GCvhYmyGTgf9' , 'ur8QEgt7J2ugicpmSCLauprXMNOVOzIfAQMOAXnbhflvb2aH1W')
auth.set_access_token('1309177113652662272-4rrOUk9Pxn9KccGPnATO2IUypgT8tr', '1K3NjvrLHuMYeR4xfpp2CpLENsgqrPugZ4WvTtI6ZAyO5' )
api = tweepy.API(auth)
user = api.me()
#print("user creds: ", user)
#api.update_status("Hello, this is a test with bukola")
#for following in tweepy.Cursor(api.friends).items():
# print(following.name)
#randomly likes a tweet based on keywords
# search = 'UniversityofHouston'
# num_tweet = 2
# for tweet in tweepy.Cursor(api.search, search).items(num_tweet):
# try:
# print("tweet liked")
# tweet.favorite()
# time.sleep(1)
# except tweepy.TweepyError as e:
# print(e.reason)
# except StopIteration:
# break
tweet_account = api.user_timeline(screen_name = "jay_hopee")
for tweet in tweet_account:
if tweet.text[0:2] != "RT":
tweet.favorite()
print("liked my own tweet")
| prajaktaandhale/Twitter-Bot-using-Python | twitterbot1.py | twitterbot1.py | py | 1,039 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 8,
"usage_type": "call"
}
] |
23562269693 | from graph_tool.all import Graph,Vertex,graph_draw,radial_tree_layout
from GN0.alpha_zero.MCTS_cached import MCTS as MCTS_old,Node,Leafnode,upper_confidence_bound
from GN0.alpha_zero.MCTS import MCTS
from GN0.alpha_zero.NN_interface import NNetWrapper
from graph_game.shannon_node_switching_game import Node_switching_game
from graph_game.graph_tools_games import get_graph_only_hex_game,Hex_game
import numpy as np
from typing import Union
import os
import time
from GN0.models import get_pre_defined
from argparse import Namespace
from graph_game.graph_tools_hashing import get_unique_hash
import torch
def dummy_nn(game:Node_switching_game):
moves = game.get_actions()
prob = np.array(list(range(len(moves))),dtype=float)+1
prob/=np.sum(prob)
value = 0.7 if len(moves)%2==0 else 0.3
return moves,torch.from_numpy(prob),torch.tensor([value])
def graph_state(game,prob=None):
if prob is None:
prior_map = None
else:
prior_map = game.view.new_vertex_property("double")
for i,vertex in enumerate(game.get_actions()):
prior_map[vertex] = prob[i]
os.system("pkill -f 'mupdf node_state.pdf'")
game.draw_me(fname="node_state.pdf",vprop1=prior_map,decimal_places=3)
os.system("nohup mupdf node_state.pdf > /dev/null 2>&1 &")
def graph_node(node:Node,prob=None):
if prob is None:
prob = node.priors
game = Node_switching_game.from_graph(node.storage)
prior_map = game.view.new_vertex_property("double")
for vertex in game.get_actions():
prior_map[vertex] = prob[list(node.moves).index(vertex)]
os.system("pkill -f 'mupdf node_state.pdf'")
game.draw_me(fname="node_state.pdf",vprop1=prior_map,decimal_places=3)
os.system("nohup mupdf node_state.pdf > /dev/null 2>&1 &")
def graph_from_hash_mcts(mcts:MCTS,root:Node_switching_game,to_show="num_visits",last_hash=int):
def recursive_add(state:Graph,cur_vertex:Vertex):
game.set_to_graph(Graph(state))
s = get_unique_hash(game.view)
if s not in mcts.Ps:
return
actions = game.get_actions()
if s in mcts.Qsa:
ucb_many = mcts.Qsa[s] + mcts.cpuct * mcts.Ps[s] * np.sqrt(mcts.Ns[s]) / (1 + mcts.Nsa[s])
else:
ucb_many = 0.5 + mcts.cpuct * mcts.Ps[s] * np.sqrt(mcts.Ns[s])
nsa = mcts.Nsa[s] if s in mcts.Nsa else np.zeros_like(mcts.Ps[s])
qsa = mcts.Qsa[s] if s in mcts.Qsa else np.ones_like(mcts.Ps[s])*0.5
for ucb,prior,visits,q,action in zip(ucb_many,mcts.Ps[s],nsa,qsa,game.get_actions()):
game.set_to_graph(Graph(state))
game.make_move(action)
new_state = Graph(game.graph)
new_s = get_unique_hash(game.view)
v = g.add_vertex()
g.add_edge(cur_vertex,v)
color[v] = "blue" if state.gp["m"] else "red"
if to_show=="num_visits":
text[v] = str(visits)
elif to_show == "q":
text[v] = f"{q:.2f}"[1:] if q<1 else "1"
elif to_show == "prior":
text[v] = f"{prior:.2f}"
elif to_show == "m":
text[v] = str(action)
elif to_show=="numbers":
text[v] = str(int(v))
elif to_show=="ucb":
text[v] = f"{float(ucb):.2f}"
number_to_state[int(v)] = new_state
if last_hash==new_s:
halo_color[v] = [0,1,0,0.5]
weather_halo[v] = True
else:
weather_halo[v] = False
halo_color[v] = [0,0,0,0]
if new_s in mcts.Ps:
shape[v] = "circle"
recursive_add(new_state,v)
else:
shape[v] = "square"
state = Graph(root.graph)
game = root
number_to_state = {}
g = Graph(directed=True)
color = g.new_vertex_property("string")
shape = g.new_vertex_property("string")
text = g.new_vertex_property("string")
size = g.new_vertex_property("int")
pen_width = g.new_vertex_property("int")
halo_color = g.new_vertex_property("vector<double>")
weather_halo = g.new_vertex_property("bool")
halo_size = g.new_vertex_property("double")
g.vp.l = halo_size
g.vp.h = halo_color
g.vp.g = size
g.vp.c = color
g.vp.s = shape
g.vp.t = text
g.vp.p = pen_width
g.vp.b = weather_halo
root_hash = get_unique_hash(game.view)
number_to_state[0] = Graph(state)
v = g.add_vertex()
halo_color[v] = [0,0,0,0]
if root_hash==last_hash:
halo_color[v] = [0,1,0,0.5]
weather_halo[v] = True
recursive_add(state,v)
halo_size.a = 1.1
makerturn = state.gp["m"]
color[v] = "red" if makerturn else "blue"
shape[v] = "circle" if root_hash in mcts.Ps else "square"
text[v] = ""
size.a = 25
return g,number_to_state
def graph_from_root(root:Union[Node,Leafnode],to_show="num_visits",last_node=None):
def recursive_add(cur_node:Node,cur_vertex:Vertex):
ucb = upper_confidence_bound(cur_node,1)
for tv,child,q,visits,one_ucb,prior in zip(cur_node.total_value,cur_node.children,cur_node.Q,cur_node.visits,ucb,cur_node.priors):
v = g.add_vertex()
g.add_edge(cur_vertex,v)
color[v] = "blue" if cur_node.storage.gp["m"] else "red"
if to_show=="num_visits":
text[v] = str(visits)
elif to_show == "q":
text[v] = f"{q:.2f}"[1:] if q<1 else "1"
elif to_show == "value":
if isinstance(child,Node):
text[v] = f"{tv:.2f}"
else:
text[v] = child.value
elif to_show == "prior":
text[v] = f"{prior:.2f}"
elif to_show == "m":
if isinstance(child,Node):
text[v] = ""
else:
text[v] = "" if child.move is None else child.move
elif to_show=="numbers":
text[v] = str(int(v))
elif to_show=="ucb":
text[v] = f"{float(one_ucb):.2f}"
number_to_node[int(v)] = child
if last_node is not None and last_node==child:
halo_color[v] = [0,1,0,0.5]
weather_halo[v] = True
else:
weather_halo[v] = False
halo_color[v] = [0,0,0,0]
if isinstance(child,Node):
shape[v] = "circle"
recursive_add(child,v)
else:
shape[v] = "square"
number_to_node = {}
g = Graph(directed=True)
color = g.new_vertex_property("string")
shape = g.new_vertex_property("string")
text = g.new_vertex_property("string")
size = g.new_vertex_property("int")
pen_width = g.new_vertex_property("int")
halo_color = g.new_vertex_property("vector<double>")
weather_halo = g.new_vertex_property("bool")
halo_size = g.new_vertex_property("double")
g.vp.l = halo_size
g.vp.h = halo_color
g.vp.g = size
g.vp.c = color
g.vp.s = shape
g.vp.t = text
g.vp.p = pen_width
g.vp.b = weather_halo
v = g.add_vertex()
halo_color[v] = [0,0,0,0]
if root==last_node:
halo_color[v] = [0,1,0,0.5]
weather_halo[v] = True
if isinstance(root,Node):
recursive_add(root,v)
halo_size.a = 1.1
makerturn = root.makerturn if isinstance(root,Leafnode) else root.storage.gp["m"]
color[v] = "red" if makerturn else "blue"
shape[v] = "square" if isinstance(root,Leafnode) else "circle"
text[v] = ""
size.a = 25
return g,number_to_node
def visualize_MCTS(new_version=True):
print("""
Instructions:
n: Node number v: number of visits
q: Q-values t: Total Values
m: moves p: Priors
u: ucb r: get result
c: select best child for next iteration
[number]: show graph for number.
""")
size = 2
game = get_graph_only_hex_game(size)
show_game = Hex_game(size)
# nn = get_pre_defined_mcts_model("misty-firebrand-26/11")
nnet = get_pre_defined("policy_value",args=Namespace(**{"hidden_channels":25,"num_layers":8,"head_layers":2}))
nn = NNetWrapper(nnet=nnet)
if new_version:
mcts = MCTS(game.copy(),dummy_nn,args=Namespace(cpuct=1),remove_dead_and_captured=False)
last_hash = get_unique_hash(game.view)
else:
mcts = MCTS_old(game,nn.predict_for_mcts,remove_dead_captured=False)
last_node = mcts.root
mode = "num_visits"
while 1:
if new_version:
g,number_to_state = graph_from_hash_mcts(mcts,game.copy(),to_show=mode,last_hash=last_hash)
else:
g,number_to_node = graph_from_root(mcts.root,to_show=mode,last_node=last_node)
graph_draw(g,radial_tree_layout(g,g.vertex(0)),vprops={"halo_size":g.vp.l,"halo":g.vp.b,"halo_color":g.vp.h,"pen_width":g.vp.p,"shape":g.vp.s,"fill_color":g.vp.c,"text":g.vp.t,"size":g.vp.g},bg_color="black",output="mcts.pdf")
os.system("pkill -f 'mupdf mcts.pdf'")
os.system("nohup mupdf mcts.pdf > /dev/null 2>&1 &")
time.sleep(0.1)
os.system("bspc node -f west")
while 1:
command = input()
if command=="n":
mode = "numbers"
break
elif command=="v":
mode = "num_visits"
break
elif command=="q":
mode = "q"
break
elif command=="t":
mode = "value"
break
elif command=="c":
if new_version:
moves,probs = mcts.extract_result(Graph(game.graph),temp=0)
else:
moves,probs = mcts.extract_result(0)
action = moves[np.argmax(probs)]
child = mcts.root.children[np.argmax(probs)]
if isinstance(child,Leafnode):
print("Failed, child is leafnode")
else:
mcts.next_iter_with_child(action,child.storage)
break
elif command=="m":
mode = "m"
break
elif command=="p":
mode = "prior"
break
elif command=="u":
mode = "ucb"
break
elif command=="r":
if new_version:
moves,probs = mcts.extract_result(Graph(game.graph),temp=0)
graph_state(game.copy(),probs)
else:
moves,probs = mcts.extract_result(0)
graph_node(mcts.root,probs)
print(moves,probs)
elif command=="":
if new_version:
value = mcts.single_iteration(Graph(game.graph))
g = Node_switching_game.from_graph(mcts.leaf_graph)
s = get_unique_hash(g.view)
if s in mcts.Ps:
graph_state(g,mcts.Ps[s])
else:
graph_state(g)
last_hash = s
else:
leaf,value = mcts.single_iteration()
if isinstance(leaf,Node):
graph_node(leaf)
if mcts.done:
print(mcts.extract_result(1))
print("MCTS is done")
print(f"got value {value}")
break
else:
if new_version:
state = number_to_state[int(command)]
g = Node_switching_game.from_graph(state)
graph_state(g,mcts.Ps[get_unique_hash(g.view)])
else:
if int(command)==0:
node = mcts.root
else:
node = number_to_node[int(command)]
if isinstance(node,Node):
show_game.set_to_graph(node.storage)
show_game.draw_me(fname="uff.pdf")
os.system("nohup mupdf uff.pdf > /dev/null 2>&1 &")
time.sleep(0.1)
os.system("bspc node -f west")
nd = node.__dict__.copy()
del nd["parent"]
del nd["children"]
if __name__=="__main__":
visualize_MCTS()
| yannikkellerde/GNN_Hex | GN0/alpha_zero/visualize_MCTS.py | visualize_MCTS.py | py | 12,530 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "graph_game.shannon_node_switching_game.Node_switching_game",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 19,
"usage_type": "call"
},
{
"... |
11822802935 | import pygame
from typing import List, Callable, Tuple
GREEN = (0, 255, 0)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREY = (57, 57, 57)
LIGHT_GREY = (99, 99, 99)
LIGHT_BLUE = (141, 182, 205)
# BACKGROUND_COLOR = (158, 58, 64)
BACKGROUND_COLOR = (113, 155, 158)
SIDEBAR_IMAGE_SCALE = 1.5 # make cards and nobles 1.5x bigger
def dim_screen(screen, color=(0, 0, 0), alpha=128):
"""
Dim the screen with a color and alpha value
"""
dim = pygame.Surface(screen.get_size())
dim.fill(color)
dim.set_alpha(alpha)
screen.blit(dim, (0, 0))
def get_selection_box(screen: pygame.Surface, width=0.5, height=0.5, color=BACKGROUND_COLOR):
"""
Creates a selection box on the screen. Centered by default.
:param screen: used for relative positioning
:param width: the width of the box relative to the screen size
:param height: the height of the box relative to the screen size
:param color: the color of the box
:return: The surface of the selection box, and the rect of the selection box
"""
x = screen.get_width() * (1 - width) / 2
y = screen.get_height() * (1 - height) / 2
width = screen.get_width() * width
height = screen.get_height() * height
box = pygame.Surface((width, height))
box.fill(color)
rect = pygame.Rect(x, y, width, height)
return box, rect
def button(text, width, height, color, border_radius=10):
"""
Create a green button with the given text.
:param text: the text to display on the button
:param width: the width of the button
:param height: the height of the button
:param color: the color of the button
:param border_radius: the border radius of the button
:return: the button
"""
# create a transparent surface
button = pygame.Surface((width, height), pygame.SRCALPHA)
pygame.draw.rect(button, color, (0, 0, width, height), border_radius=border_radius)
write_on(button, text)
return button
def flash_message(screen, text, color=GREEN, opacity=255):
"""
Display a message
:param color: the color of the box
:param screen: the screen to display the message on
:param text: the text to display
:param: color: the color of the text
:param opacity: the opacity of the box
"""
box = pygame.Surface((screen.get_width() / 2, screen.get_height() / 10))
box.set_alpha(opacity)
box.fill(color)
write_on(box, text)
screen.blit(box, (screen.get_width() / 2 - box.get_width() / 2, 0))
def flash_right_side(screen, text, color=GREEN, opacity=255, font_size=20):
"""
Display a message in the top right corner
:param color: the color of the box
:param screen: the screen to display the message on
:param text: the text to display
:param: color: the color of the text
:param opacity: the opacity of the box
:param font_size: the size of the font
"""
font = pygame.font.Font(None, font_size)
text_surface = font.render(text, True, color)
box_width = text_surface.get_rect().width + 10
box_height = screen.get_height() / 10
box = pygame.Surface((box_width, box_height))
box.set_alpha(opacity)
box.fill(color)
write_on(box, text, font_size=font_size)
screen.blit(box, (screen.get_width() - box.get_width() - 30, 0))
def write_on(surface, text, color=BLACK, font='Arial', font_size=20, center=None):
"""
Write text to a surface
:param center: center the text on the surface
:param text: the text to write
:param surface: the rect to write to
:param color: the color of the text
:param font: the font of the text
:param font_size: the size of the font
"""
font = pygame.font.SysFont(font, font_size)
text = font.render(text, True, color)
text_rect = text.get_rect()
if center is None:
text_rect.center = (surface.get_width() / 2, surface.get_height() / 2)
else:
text_rect.center = center
surface.blit(text, text_rect)
def outlined_text(surface, text, outline_color=BLACK, color=WHITE, font='Arial', font_size=20, center=None):
"""
Write text to a surface
:param outline_color: the color of the outline
:param center: center the text on the surface
:param text: the text to write
:param surface: the rect to write to
:param color: the color of the text
:param font: the font of the text
:param font_size: the size of the font
"""
if center is None:
center = (surface.get_width() / 2, surface.get_height() / 2)
# top left
write_on(surface, text, outline_color, font, font_size, (center[0] - 1, center[1] - 1))
# top right
write_on(surface, text, outline_color, font, font_size, (center[0] + 1, center[1] - 1))
# btm left
write_on(surface, text, outline_color, font, font_size, (center[0] - 1, center[1] + 1))
# btm right
write_on(surface, text, outline_color, font, font_size, (center[0] + 1, center[1] + 1))
# TEXT FILL
write_on(surface, text, color, font, font_size, center)
class Button:
def __init__(self,rectangle : pygame.Rect, on_click_event : Callable[[None], None], color: Tuple[int,int,int] = LIGHT_GREY, text: str = "") -> None:
self.rectangle = rectangle
self.activation = on_click_event
self.color = color
self.text = text
def set_text(self, text):
self.text = text
def display(self, screen):
pygame.draw.rect(screen,self.color,self.rectangle) | YoussefSamaan/Splendor-Game | client/game/utils.py | utils.py | py | 5,485 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.Surface",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"... |
22080278831 | import flask
import sqlite3
from flask import jsonify, Response
from config import DEBUG, DATABASE_PATH
from models import Application, ApplicationHistory
app = flask.Flask(__name__)
app.config["DEBUG"] = DEBUG
@app.route('/<app_id>', methods=['GET'])
def application(app_id):
connection = sqlite3.connect(DATABASE_PATH)
application = Application.from_app_id(app_id, connection)
if application is not None:
return jsonify(application.to_json())
return jsonify({
'error': f'Application with {app_id} not found'
}), 204
app.run()
| bilelmoussaoui/flathub-stats | flathub/server.py | server.py | py | 569 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config.DEBUG",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.DATABASE_PATH",
"li... |
36620678228 | import openai
import os
engine_list = ['gpt-35-turbo', 'gpt-35-turbo-16k', 'gpt-4', 'gpt-4-32k', 'text-embedding-ada-002']
class Config:
OPENAI_API_TYPE = "azure"
OPENAI_API_BASE = "your_api_base"
OPENAI_API_VERSION = "2023-07-01-preview"
OPENAI_API_KEY = "your_api_key"
MAX_TURNS = 10
os.makedirs('logs', exist_ok=True)
SD_URL = ""
SD_T2I_API = ""
serpapi_api_key = ""
serper_api_key = ""
class OpenAIChat:
def __init__(self, model_name="gpt-35-turbo"):
openai.api_type = Config.OPENAI_API_TYPE
openai.api_base = Config.OPENAI_API_BASE
openai.api_version = Config.OPENAI_API_VERSION
openai.api_key = Config.OPENAI_API_KEY
if model_name not in engine_list:
raise ValueError("The model name is not in the list of available models among gpt-35-turbo, gpt-35-turbo-16k, gpt-4, gpt-4-32k, text-embedding-ada-002.")
self.model_name = model_name
self.messages = [{"role": "system", "content": "You are an experienced python programmer which can write codes to fulfill user's requests."}]
def chat(self, messages):
message = ""
if isinstance(messages, list):
message += (tmp_message for tmp_message in messages)
elif isinstance(messages, str):
message = messages
else:
raise TypeError("Messages must be a list or str.")
self.messages.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
engine=self.model_name,
messages=self.messages,
#max_tokens=2048
)
self.messages.append({"role": "assistant", "content": response['choices'][0]['message']['content']})
return response['choices'][0]['message']['content']
def set_system_prompt(self, prompt):
system_messages = ""
if isinstance(prompt, list):
system_messages += (system_message for system_message in prompt)
elif isinstance(prompt, str):
system_messages = prompt
else:
raise TypeError("System messages must be a list or string.")
self.messages[0]['content'] = prompt
def get_history(self):
return self.messages
if __name__ == "__main__":
openai_chat = OpenAIChat()
query = ""
print(openai_chat.chat(query)) | pooruss/ML-Framework-for-Diverse-Applications-in-Trading-and-Finance | openai_chat.py | openai_chat.py | py | 2,362 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.makedirs",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "openai.api_type",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "openai.api_base",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "openai.api_vers... |
40218112068 | from datetime import datetime
from flask import render_template, url_for, redirect, flash, request, Blueprint
from flask_login import current_user, login_required
from website import db, settings
from website.hours.forms import LogHoursForm
from website.models import User, Event, Hours
from website.events.utils import week_n
hours = Blueprint('hours', __name__)
@hours.route('/log_hours/<int:event_id>', methods=['GET', 'POST'])
@login_required
def log_hours(event_id):
event = Event.query.get_or_404(event_id)
previously_existing = Hours.query.filter_by(user_id=current_user.id, event_id=event.id).first()
form = LogHoursForm(event.start_date, event.end_date)
form.event = event
if form.validate_on_submit():
start_datetime = datetime.combine(form.end_date.data, form.end_time.data)
end_datetime = datetime.combine(form.start_date.data, form.start_time.data)
duration_hrs = (start_datetime - end_datetime).total_seconds() / 3600
logged_hours = Hours(duration=duration_hrs, approved=False, user_id=current_user.id, event_id=event.id)
if previously_existing:
db.session.delete(previously_existing)
db.session.add(logged_hours)
db.session.commit()
flash(f"{duration_hrs} hours logged for {event.title}, {event.category}.", 'success')
return redirect(url_for('events.weekly_events', weeks_diff=week_n(datetime.fromtimestamp(event.start_date))))
elif request.method == 'GET':
form.start_date.data = datetime.fromtimestamp(event.start_date).date()
form.end_date.data = datetime.fromtimestamp(event.end_date).date()
form.start_time.data = datetime.fromtimestamp(event.start_date).time()
form.end_time.data = datetime.fromtimestamp(event.end_date).time()
return render_template('log_hours.html', title="Log Hours", event=event, form=form,
previously_existing=bool(previously_existing))
@hours.route('/manage_hours')
@login_required
def manage_hours():
if not current_user.admin:
return redirect(request.referrer)
hours_types = settings['event_categories']
students = User.query.filter(User.position.in_(settings['student_types'])).all()
mentors = User.query.filter(User.position.in_(settings['mentor_types'])).all()
if students:
students.sort(key=lambda x: (x.last_name, x.first_name))
if mentors:
mentors.sort(key=lambda x: (x.last_name, x.first_name))
hours_list = Hours.query.filter_by(approved=False).all()
return render_template('manage_hours.html', title="Manage Hours", students=students, mentors=mentors,
hours_list=hours_list, hours_types=hours_types)
@hours.route('/approve_hours/<int:hours_id>')
@login_required
def approve_hours(hours_id):
if not current_user.admin:
return redirect(request.referrer)
hours_for_approval = Hours.query.get_or_404(hours_id)
hours_for_approval.approved = True
db.session.commit()
flash(f"{hours_for_approval.duration} hours approved for {hours_for_approval.user.first_name} "
f"{hours_for_approval.user.last_name}.", 'success')
return redirect(url_for('hours.manage_hours'))
@hours.route('/delete_hours/<int:hours_id>')
@login_required
def delete_hours(hours_id):
if not current_user.admin:
return redirect(request.referrer)
hours_to_delete = Hours.query.get_or_404(hours_id)
db.session.delete(hours_to_delete)
db.session.commit()
flash(f"Hours not approved for {hours_to_delete.user.first_name} {hours_to_delete.user.last_name}.", 'success')
return redirect(url_for('hours.manage_hours'))
| kastuparu/FRC190Website | website/hours/routes.py | routes.py | py | 3,670 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "website.models.Event.query.get_or_404",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "website.models.Event.query",
"line_number": 18,
"usage_type": "attribute"
},
{
... |
24366566245 | import datetime
import boto3
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.views.decorators.clickjacking import xframe_options_exempt
from phishing.forms import CreateTemplate
from phishing.models import Submission, MTurkUser, Rating
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
create_hits_in_live = False
environments = {
"live": {
"endpoint": "https://mturk-requester.us-east-1.amazonaws.com",
"preview": "https://www.mturk.com/mturk/preview",
"manage": "https://requester.mturk.com/mturk/manageHITs",
"reward": "0.00"
},
"sandbox": {
"endpoint": "https://mturk-requester-sandbox.us-east-1.amazonaws.com",
"preview": "https://workersandbox.mturk.com/mturk/preview",
"manage": "https://requestersandbox.mturk.com/mturk/manageHITs",
"reward": "0.11"
},
}
mturk_environment = environments["live"] if create_hits_in_live else environments["sandbox"]
session = boto3.Session()
client = session.client(
service_name='mturk',
region_name='us-east-1',
endpoint_url=mturk_environment['endpoint'],
)
def index(request):
template = loader.get_template('phishing/index.html')
context = {
'testing': 'hi there',
}
return HttpResponse(template.render(context, request))
@xframe_options_exempt
def submit(request):
template = loader.get_template('phishing/submit.html')
context = {
'directions': """
Your target is Roger Johnson, a student employee at the University of Oak Creek.
Your message must be written as if sent by Thomas Smith, an HR employee at the University.
Your goal is to convince Roger to send ``Thomas'' his bank account information.
The personal information you may include about Roger includes the following:
<dl>
<dt>Name</dt> <dd>Roger Johnson</dd>
<dt>Date of Birth</dt> <dd>April 18, 1995</dd>
<dt>Supervisior</dt> <dd>Alice Davis</dd>
<dt>Position</dt> <dd>Math Grader</dd>
</dl>
""",
'worker_id': request.GET.get('workerId', ''),
'assignment_id': request.GET.get('assignmentId', ''),
'turk_submit_to': request.GET.get('turkSubmitTo', '')
}
return HttpResponse(template.render(context, request))
def submit_template(request):
tmpl = CreateTemplate({
'worker_id': request.POST.get('worker_id', ''),
'assignment_id': request.POST.get('assignmentId', ''),
'subject': request.POST.get('message_subject', ''),
'message_template': request.POST.get('message_template', '')
})
if tmpl.is_valid():
tmpl.execute()
else:
return JsonResponse({'result': False})
return JsonResponse({'result': True})
@xframe_options_exempt
def review(request):
template = loader.get_template('phishing/review.html')
task_id = request.GET.get('task', '')
objs = Submission.objects.filter(task=task_id).all()
context = {
'name': 'John Doe',
'worker_id': request.GET.get('workerId', ''),
'assignment_id': request.GET.get('assignmentId', ''),
'turk_submit_to': request.GET.get('turkSubmitTo', ''),
'task': request.GET.get('task', ''),
'messages': list(map(lambda x: {
'from': 'John Doe <john@example.com>',
'subject': x.subject,
'body': x.text
}, objs))
}
return HttpResponse(template.render(context, request))
def submit_review(request):
logger.warning(request.POST)
worker_id = request.POST.get('worker_id', '')
task_id = request.POST.get('task', '')
assignment_id = request.POST.get('assignmentId', '')
results = request.POST.get('results', '').split(",")
try:
mt_usr = MTurkUser.objects.get(pk=worker_id)
except MTurkUser.DoesNotExist:
mt_usr = MTurkUser(workerId=worker_id)
mt_usr.save()
submissions = Submission.objects.filter(task=task_id).all()
for (submission, result) in zip(submissions, results):
rating = Rating(
creator=mt_usr,
assignmentId=assignment_id,
when_submitted=datetime.datetime.now(),
submission=submission,
is_spam=result == 'spam',
is_email=True,
is_comprehensible=True,
is_correct_info=True,
)
rating.save()
if not submission.payout:
all_ratings = Rating.objects.filter(assignmentId=assignment_id).all()
if len(all_ratings) < 10:
continue
client.approve_assignment(
AssignmentId=submission.assignmentId
)
spam_count = map(lambda x: x.is_spam, all_ratings).count(True)
if spam_count <= 1:
client.send_bonus(
WorkerId=submission.creator.workerId,
AssignmentId=submission.assignmentId,
BonusAmount=0.5, #TODO set the bonus amount
Reason="Your submission was almost universally seen as not spam"
)
submission.payout = True
submission.save()
return JsonResponse({'result': True}) | maths22/trawl | phishing/views.py | views.py | py | 5,419 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "d... |
16239766894 | import warnings
from torch.utils.data.dataloader import DataLoader
from dataloaders.datasets.cityscapes import CityscapesSegmentation
from config_utils.search_args import obtain_search_args
from utils.loss import SegmentationLosses
import torch
import numpy as np
from auto_deeplab import AutoDeeplab
model = AutoDeeplab(19, 12).cuda()
args = obtain_search_args()
args.cuda = True
criterion = SegmentationLosses(weight=None, cuda=args.cuda).build_loss(mode=args.loss_type)
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
args.crop_size = 64
dataset = CityscapesSegmentation(args, r'E:\BaiduNetdiskDownload\cityscapes', 'train')
loader = DataLoader(dataset, batch_size=2, shuffle=True)
grads = {}
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
for i, sample in enumerate(loader):
image, label = sample['image'].cuda(), sample['label'].cuda()
# from thop import profile
# params, flops = profile(model, inputs=(image, ))
# print(params)
# print(flops)
model.betas.register_hook(save_grad('y'))
prediction = model(image)
# y = 1e-3*torch.randn(12, 4, 3).cuda()
# criterion = torch.nn.MSELoss()
# z = criterion(prediction, label)
z = prediction.mean()
z.backward()
print(grads['y'])
print(grads['y'].shape)
# print(grads['y1'])
# print(grads['y1'].shape)
if i == 0:
exit()
# 查看 y 的梯度值
| NoamRosenberg/autodeeplab | test.py | test.py | py | 1,467 | python | en | code | 306 | github-code | 1 | [
{
"api_name": "auto_deeplab.AutoDeeplab",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "config_utils.search_args.obtain_search_args",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.loss.SegmentationLosses",
"line_number": 16,
"usage_type": "ca... |
17659817974 | import json
import sys
import numpy
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.optimizer_v2.adam import Adam
import cv2
import tensorflow
import warnings
from keras.losses import categorical_crossentropy
from keras.models import model_from_json
from faceRecognition import testFaceRecognition2
from utils import *
# numpy.set_printoptions(threshold=sys.maxsize)
# inputList, outputList = getData('sessions/**/*.jpg')
# trainingInputSet, trainingOutputSet, validationInputSet, validationOutputSet = divideData(inputList, outputList)
#
# tensorflow.compat.v1.disable_eager_execution()
# json_file = open('model_v1.json', 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# loaded_model = model_from_json(loaded_model_json)
# loaded_model.load_weights("model_v1.h5")
#
# dataV = []
# labelsV = []
# facecasc = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
#
# for i in range(len(validationInputSet)):
# imagePath = validationInputSet[i]
# image = cv2.imread(imagePath)
# image = cv2.resize(image, (64, 64))
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# roi_gray = adjusted_detect_face(gray,(64,64))
# labelsV.append(validationOutputSet[i])
# dataV.append(roi_gray)
#
# lenDataV = len(dataV)
# dataV = np.reshape(dataV, (lenDataV, 64, 64, 1))
# loaded_model.compile(loss=categorical_crossentropy,
# optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7),
# metrics=['accuracy'])
# # y_predicted = loaded_model.predict(np.array(dataV))
# # outputNames = ["happy","sad","angry","disgust","neutral","surprisa","fearful"]
# # acc, prec, recall, cm = evalMultiClass(np.array(dataV), y_predicted, outputNames)
# # plotConfusionMatrix(cm, outputNames, "Emotion classification")
# #
# # print('acc: ', acc)
# # print('precision: ', prec)
# # print('recall: ', recall)
#
# test_eval = loaded_model.evaluate(np.array(dataV), np.array(labelsV), verbose=1)
# print('Test loss:', test_eval[0])
# print('Test accuracy:', test_eval[1])
# testFaceRecognition(trainingInputSet, trainingOutputSet, validationInputSet, validationOutputSet)
from utils import getDataTesting
def testEmotionDetection(imagePath):
# tensorflow.compat.v1.disable_eager_execution()
json_file = open('E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\efficient_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\efficient_model.h5")
loaded_model.compile(optimizer=Adam(learning_rate=1e-3),
loss='categorical_crossentropy',
metrics=['accuracy'])
# img = cv2.imread("E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\test_sad_dragos.jpg")
img = cv2.imread(imagePath)
img = adjusted_detect_face(img, (64, 64), "emotion")
prediction = loaded_model.predict(img)
classes = np.argmax(prediction, axis=1)
result = Emotions(classes[0]).name
return {"emotion": result}
class MainWrapper:
def __init__(self):
numpy.set_printoptions(threshold=sys.maxsize)
self.inputList, self.outputList = getDataTesting("E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\poze\\*.jpg")
def runFacialRecognition(self, jsonArguments):
with(open("E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\test-face.txt", 'w')) as f:
f.write("ceaw din face hehe")
f.write(jsonArguments)
args = json.loads(jsonArguments)
testImagePath = args['modelInput']['imagePath']
return json.dumps(testFaceRecognition2(self.inputList, self.outputList, testImagePath))
def runEmotionDetection(self, jsonArguments):
with(open("E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\test-emo.txt", 'w')) as f:
f.write("ceaw din emo hehe")
f.write(jsonArguments)
args = json.loads(jsonArguments)
testImagePath = args['modelInput']['imagePath']
return json.dumps(testEmotionDetection(testImagePath))
if (__name__ == "__main__"):
main = MainWrapper()
# print(main.runFacialRecognition("{\"modelInput\": {\"imagePath\": \"test_happy_dragos.jpg\"}}"))
# args = {
# "modelInput": {
# "imagePath": "E:\\Informatique\\University\\Anul3\\MIRPR\\mirpr-emotional\\emotional-backend\\emotional-backend\\temp_emotion_recog_images\\test.jpg"
# }
# }
print(main.runEmotionDetection("{\"modelInput\": {\"imagePath\": \"test_sad_dragos.jpg\"}}"))
# print(main.runEmotionDetection(json.dumps(args)))
| qqrtos/kids-learning-platform | mirpr-emotional/main.py | main.py | py | 4,764 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "keras.models.model_from_json",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "keras.optimizer_v2.adam.Adam",
"line_number": 73,
"usage_type": "call"
},
{
"api_name... |
9264868588 |
# import libraries
import argparse
import os
import json
import datetime
import boto3
import sagemaker
from sagemaker.tensorflow.estimator import TensorFlow
from sagemaker.tensorflow.model import TensorFlowModel
from env import *
def train():
"Train the model in SageMaker"
with open(HYPERPARAMETERS_FILE_PATH) as f:
hyperparameters = json.load(f)
# define the estimator
print("Build Estimator...")
estimator = TensorFlow(entry_point=ENTRY_POINT_TRAIN,
source_dir=SOURCE_DIR,
output_path=MODEL_ARTIFACTS_S3_LOCATION,
code_location=CUSTOM_CODE_TRAIN_UPLOAD_S3_LOCATION,
base_job_name=BASE_JOB_NAME,
role=ROLE_ARN,
py_version=PYTHON_VERSION,
framework_version=FRAMEWORK_VERSION,
hyperparameters=hyperparameters,
instance_count=TRAIN_INSTANCE_COUNT,
instance_type=TRAIN_INSTANCE_TYPE,
distributions=DISTRIBUTIONS)
# train the model
print("Fit the estimator...")
estimator.fit({"train": TRAIN_DATA_S3_LOCATION, "test": TEST_DATA_S3_LOCATION})
print("Store the training job name...")
with open(FILENAME_TRAINING_JOB_NAME, "w+") as f:
f.write(str(estimator.latest_training_job.name))
return estimator
def deploy():
"Deploy the model in a SageMaker Endpoint "
print("Get the latest training job name...")
with open(FILENAME_TRAINING_JOB_NAME) as f:
training_job_name = f.read()
print("Training job name :", training_job_name)
print("Build the Model...")
model = TensorFlowModel(
entry_point=ENTRY_POINT_INFERENCE,
source_dir=SOURCE_DIR,
framework_version=FRAMEWORK_VERSION,
model_data=f"{MODEL_ARTIFACTS_S3_LOCATION}/{training_job_name}/output/model.tar.gz",
code_location=CUSTOM_CODE_SERVING_UPLOAD_S3_LOCATION,
name=MODEL_NAME,
role=ROLE_ARN,
sagemaker_session=SESS)
print("Build an endpoint...")
predictor = model.deploy(endpoint_name=ENDPOINT_NAME,
initial_instance_count=DEPLOY_INSTANCE_COUNT,
instance_type=DEPLOY_INSTANCE_TYPE)
return predictor
def main(args):
# executing function
if args.mode == "train":
train()
elif args.mode == "deploy":
deploy()
else:
raise RuntimeError(f"{args.mode} is not recognized.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode",
type=str,
required=True,
choices=["train", "deploy"]
)
args = parser.parse_args()
main(args) | neoxia/sagemaker-benchmark | projects/mnist_tensorflow2_scriptmode/pipeline/main.py | main.py | py | 2,940 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sagemaker.tensorflow.estimator.TensorFlow",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sagemaker.tensorflow.model.TensorFlowModel",
"line_number": 58,
"usage_type": "call"
... |
23981818316 | #!/usr/bin/env python
import pydart
import logging
from simulation import Simulation
from window import Window
import utils
import sys
# from logging_tree import printout # pip install logging_tree
# printout()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.handlers = []
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
logfmt = '[%(levelname)s][%(asctime)s][%(module)s:%(lineno)d] %(message)s'
formatter = logging.Formatter(logfmt)
ch.setFormatter(formatter)
root.addHandler(ch)
# # Configure a logger
# logfmt = '[%(levelname)s][%(asctime)s][%(module)s:%(lineno)d] %(message)s'
# logging.basicConfig(level=logging.DEBUG,
# format=logfmt,
# datefmt='%m/%d/%Y %I:%M:%S %p')
# printout()
# logging.error('test1-2-3')
# exit(0)
# Get a logger for this file
logger = logging.getLogger(__name__)
logger.info('Green stair project')
# Register jsonpickle numpy handler
utils.jsonpickle_numpy.register_handlers()
step_activation = None
prefix = ''
postfix = ''
if len(sys.argv) > 1:
index = 1
try:
step_activation = float(sys.argv[index])
index += 1
except ValueError:
logger.info('The first argument is not float: %s' % sys.argv[index])
for i, arg_i in enumerate(sys.argv[index:]):
if i == 0:
prefix = arg_i
elif i == 1:
postfix = arg_i
sim = Simulation(step_activation)
sim.prefix = prefix
sim.postfix = postfix
logger.info('prefix/postfix = %s/%s' % (sim.prefix, sim.postfix))
# Run the application
pydart.qtgui.run(title=sim.title(), simulation=sim, cls=Window)
| sehoonha/green_stair | main.py | main.py | py | 1,622 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.stdout... |
25485702994 | #!/usr/bin/env python3
from mpd import MPDClient
from requests import post
#from secrets import secrets # Get personal passwords
# NOTE: the 'secrets' module didn't exist at the time
from time import sleep, strftime
POST_URL = 'http://blog.pineman.sexy/control'
MPD_SOCKET = '/home/pineman/.config/mpd/socket'
client = MPDClient()
client.connect(MPD_SOCKET, '')
data = {'song': ''}
def get_mpd_data():
idle = client.idle()
if idle[0] == 'player':
state = client.status()['state']
wanted = ['play']
if state in wanted:
info = client.currentsong()
try:
artist = info['artist']
title = info['title']
song = '{0} - {1}'.format(artist, title)
except KeyError:
filename = info['file']
song = filename.split('/')[-1][:-4]
finally:
global data
if data['song'] != song:
data = {'date': strftime('%X %Z %a, %d %b'), 'song': song}
return data
else:
return None
else:
return None
def post_mpd_data(data):
"""POST the data as post data, not url-encoded params"""
try:
print(data)
user = secrets[0]
password = secrets[1]
request = post(POST_URL, data=data, auth=(user, password))
print('success')
except Exception as error:
print('error:\n{0}'.format(error))
def main():
while True:
data = get_mpd_data()
if data:
post_mpd_data(data)
if __name__ == '__main__':
main()
| pineman/code | old_proj/stream/mpdpostd.py | mpdpostd.py | py | 1,359 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "mpd.MPDClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 46,
"usage_type": "call"
}
] |
20210567264 | from dtcwt_scattering import DtcwtScattering2D
import numpy as np
from copy import copy
from keras import layers
from keras.models import Sequential
from keras.optimizers import RMSprop
from time import time
from statistics import mean
class DtcwtConvClassifier:
def __init__(self, m=2):
self.transform2D = DtcwtScattering2D()
self.m = m
## DEFINITION MODEL
#self.model = SVC(kernel="linear", probability=True)
self.model = Sequential()
self.model.add(layers.Conv1D(20, 5, input_shape=(127,16)))
self.model.add(layers.Conv1D(20, 3))
self.model.add(layers.Conv1D(20, 3))
self.model.add(layers.GlobalAveragePooling1D())
self.model.add(layers.Dense(10, activation='softmax'))
self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
def __resize_image(self, x):
current_size = len(x)
new_size = 2 ** (int(np.log2(current_size)) + 1)
return np.pad(
x,
pad_width=int((new_size - current_size) / 2),
mode="constant",
constant_values=0,
)
def __to_scat_vector(self, x):
x_c = copy(x)
x_c = self.__resize_image(x_c)
scatCoef = self.transform2D.transform(np.asarray(x_c), self.m)
scatVector = []
for c in scatCoef:
scatVector = scatVector + [c.flatten()]
return scatVector
def fit(self, X, y):
scatX = []
times = []
for i in range(len(X)):
if i%25 == 0:
print('{}/{}'.format(i, len(X)))
t = time()
scatX.append(self.__to_scat_vector(X[i]))
times.append(time()-t)
print('Mean computing time : ', mean(times))
print('Total computing time : ', sum(times))
scatX = np.array(scatX)
print('SCATX SHAPE :', scatX.shape)
history = self.model.fit(scatX, y, batch_size=100, verbose=1, epochs=100, validation_split=0.2)
return history
def predict(self, X):
scatX = []
for i in range(len(X)):
scatX.append(self.__to_scat_vector(X[i]))
return self.model.predict(scatX)
def evaluate(self, X, y):
scatX = []
for i in range(len(X)):
scatX.append(self.__to_scat_vector(X[i]))
return self.model.evaluate(scatX, y)
| COMPLEX-WAVELET/complex-wavelet | conv_classifier.py | conv_classifier.py | py | 2,384 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dtcwt_scattering.DtcwtScattering2D",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv1D",
"line_number": 18,
"usage_type": "call"
},
{
"ap... |
8887803211 | import scipy.signal as spsig
import globalVariables as gv
import numpy as np
import os
import matplotlib.pyplot as plt
from astropy.io import fits
import scipy.special as sps
import scipy.optimize as spo
from MakeMHzScale import MHzScale
import scipy.ndimage as spni
import scipy.interpolate as spi
#analyze the big fat wide focus. Also make a pretty picture of it if so desired
#------voigt fit
def voigtSpaceFit(x,x0,a,sigma,gamma,b):
v0=sps.voigt_profile(0, sigma, gamma)
v=sps.voigt_profile(x-x0, sigma, gamma)
return a*v/v0+b
def voigtImageFit(x, x0, a, sigma, gamma, b):
v0=sps.voigt_profile(0, sigma, gamma)
v=sps.voigt_profile(x-x0, sigma, gamma)
return a*v/v0+b
#Directory
path = "C:\Data\Runs\\6_23_21"
os.chdir(path)
fileName = 'run18Far'
#Opening fits file and creating array. Flip the y axis such that image is oriented correctly.
fitsFile = fits.open(fileName+'.fits')
imagesList = fitsFile[0].data
imagesArr=imagesList.astype(float)
imagesArr=np.flip(imagesArr,axis=1)
trimVal=3e3
imagesArr[imagesArr>trimVal]=trimVal
for i in range(imagesArr.shape[0]):
imagesArr[i]=spni.gaussian_filter(imagesArr[i], 0.5)
#crop the image to the focus to remove noise
xStart=0
xEnd=-1
yStart=0
yEnd=800
temp=imagesArr[:,yStart:yEnd,xStart:xEnd]
temp=np.mean(temp,axis=2)
temp=np.mean(temp,axis=1)
guess=[np.argmax(temp),np.max(temp)-np.min(temp),1,1,np.min(temp)]
x=np.arange(temp.shape[0])
params,pcov=spo.curve_fit(voigtImageFit, x, temp, p0=guess)
fwhmImage=.5346*(2*params[3])+np.sqrt(.2166*(2*params[3])**2+(params[2]*2.335)**2)
#select the left and right image to capture most of the signal
centerImage=params[0]
imageLeft=int(centerImage-2*fwhmImage)
imageRight=int(centerImage+2*fwhmImage)
plt.axvline(x=imageLeft,c='black')
plt.axvline(x=imageRight,c='black')
plt.scatter(x,temp,c='r')
plt.plot(voigtImageFit(x, *params))
plt.grid()
plt.show()
#this start and end value captures most of the beam in frequency space. It is used to make the single image that is the mean
#of a stack of iamges between imStart:imEnd
offset=0
imStart=imageLeft
imEnd=imageRight
imageMean=np.mean(imagesArr[imStart:imEnd],axis=0)
#visualize the results
binning=6
magnification=2.77
print('BINNING IS SET TO '+str(binning))
pixelSize=magnification*binning*.025 #in mm
print(pixelSize)
#-----------prepare the mean image-----------------------------------------
#Position in mm. Zero corresponds to leftmost side of the laser beam. The start of the viewing region is at 8mm
powerPosArr = np.asarray([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85])-8 #subtract 8 to move from laser
#frame to image frame
#Measuring the power with the power meter with no small aperature over the sensor.
powerArr = np.asarray([37.2, 33.2, 32.3, 33, 34.1, 35.9, 38.2, 38.3, 37, 35, 33, 31.6, 30, 27, 26.3, 25, 23.2, 22.3])
powerArr=powerArr/np.max(powerArr) #normalize data
powerCorrectionFunc=spi.Rbf(powerPosArr, 1/powerArr, smooth=.1)
# test=np.linspace(0,85)
# plt.plot(test,powerCorrectionFunc(test))
# plt.scatter(powerPosArr,1/powerArr)
# plt.show()
def adjust_Image_To_Laser_Power(image):
#adjust each column in image by a factor which is equal to the inverse of the laser power (normalized to 1)
#at that location. The goal is to try and remove the effect of laser power. This is done by making a correction
#matrix with the same dimensions as the image and multiplyuing the image with it. A column in the correction matrix
#has the same correction factor in it up and down
powerCorrection=powerCorrectionFunc(np.arange(image.shape[1])*pixelSize)
powerCorrectionMatrix=np.tile(powerCorrection,[image.shape[0],1])
return image*powerCorrectionMatrix
def adjust_Image_To_Peak_Brightness_Each_Column(image):
peakSignalArr=np.max(image,axis=0)
print(np.argmax(image,axis=0))
peakSignalArr=peakSignalArr/peakSignalArr.max()
correctionArr=1/peakSignalArr
# correctionArr=spsig.savgol_filter(correctionArr,2*int(correctionArr.shape[0]*.25)//2+1,1) #if you want to smooth it
#. You can play with the parameters, it's rather qualitative
pixelArr=np.arange(image.shape[1])
# plt.plot(pixelArr,correctionArr)
# plt.plot(pixelArr,1/peakSignalArr)
# plt.show()
correctionMatrix=np.tile(correctionArr,[image.shape[0],1])
return image*correctionMatrix
imageBackGround=(np.mean(imagesArr[-5:],axis=0)+np.mean(imagesArr[:5],axis=0))/2 #take images from beginning and end
#to get average of background noise
imageBackGround=imageBackGround[yStart:yEnd,xStart:xEnd]
#
prettyImage=imageMean[yStart:yEnd,xStart:xEnd].copy() #crop image to only the focus to remove noise. copy to not mess
#with the original image by accident if using the whole image
prettyImage=prettyImage-imageBackGround
# prettyImage=spni.gaussian_filter(prettyImage, 1.0)
# prettyImage=adjust_Image_To_Laser_Power(prettyImage)
prettyImage=adjust_Image_To_Peak_Brightness_Each_Column(prettyImage)
cutoff=30
prettyImage[prettyImage<cutoff]=cutoff
cutoff2=180
prettyImage[prettyImage>cutoff2]=cutoff2
plt.imshow(prettyImage)#,cmap='gray')
plt.show()
#now go through the focus and get the transvese profile
#first define function to quantify the width
def width_Function(y):
x=np.arange(y.shape[0])*pixelSize
x0Guess=x[np.argmax(y)]
aGuess=np.max(y)-np.min(y)
sigmaGuess=1.0
gammaGuess=1.0
bGuess=x[np.argmin(y)]
guess=[x0Guess,aGuess,sigmaGuess,gammaGuess,bGuess]
eps=1e-10
bounds=[(-np.inf, eps, eps, eps, 0.0), (np.inf, np.inf, np.inf, np.inf, np.inf)]
try:
params,pcov=spo.curve_fit(voigtSpaceFit,x,y,p0=guess,bounds=bounds)
except:
print('failed to fit')
return np.nan
fwhm=.5346*(2*params[3])+np.sqrt(.2166*(2*params[3])**2+(params[2]*2.335)**2)
# print(params[2:4],fwhm)
# plt.scatter(x,y,c='r')
# plt.plot(x,voigtSpaceFit(x,*params))
# plt.show()
return fwhm,params[1]
yUpper=yStart #beginning of transverse plot in the vertical direction
yLower=yEnd #ending of transverse plot in the vertical direction
numColumns=prettyImage.shape[1]-1
print('numcolumns',numColumns)
columnSteps=6 #aggregate this many columns into one
boxSize = np.round(columnSteps*pixelSize,2)
widthList=[]
xWidth = numColumns*pixelSize
sigList=[]
if numColumns%columnSteps!=0:
print('column clumping amount should be a divisor of number of columns with no remainder')
numColumns=(numColumns//columnSteps)*columnSteps
for i in range(0,numColumns,columnSteps):
y=np.sum(prettyImage[:,i:i+columnSteps],axis=1)
width,sig=width_Function(y)
widthList.append(width)
sigList.append(sig)
totalSignal=np.mean(prettyImage)
xArr = np.linspace(0,xWidth,num=int(numColumns/columnSteps))
# zArr=np.arange(0,numColumns/columnSteps)*pixelSize
plt.scatter(xArr,sigList)
plt.xlabel('Distance from leftward edge of frame (cm)')
plt.ylabel('Signal Strength of Voigt for Each Box')
plt.title(fileName+' ,Box Size in x of %s mm' %boxSize)
plt.show()
plt.scatter(xArr,widthList)
plt.title(fileName+' ,Box Size in x of %s mm' %boxSize)
plt.xlabel('Distance from leftward edge of frame (cm)')
plt.ylabel('FWHM for each Box (mm)')
# plt.savefig(fileName)
plt.show()
| BillyTheKidPhysicist/experimentControlAndAnalysis | prettyFocusAnalzer.py | prettyFocusAnalzer.py | py | 7,213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.special.voigt_profile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.special",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "scipy.special.voigt_profile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": ... |
28646418023 | from datetime import datetime
from ..schema import BaseTransformer
class Transformer(BaseTransformer):
"""Transform Utah raw data for consolidation."""
postal_code = "UT"
fields = dict(
company="Company Name",
location="Location",
notice_date="Date of Notice",
jobs="Affected Workers",
)
date_format = ("%m/%d/%Y", "%m/%d/%y")
date_corrections = {
"03/09/2020&": datetime(2020, 3, 9),
"01/05/18/": datetime(2018, 1, 5),
"03/05/14 Updated": datetime(2014, 3, 5),
"09/31/10": datetime(2010, 9, 30),
"05/2009": datetime(2009, 5, 1),
"01/07//09": datetime(2009, 1, 7),
"08/31//2022": datetime(2022, 8, 31),
}
jobs_corrections = {
"645 Revised": 645,
}
| biglocalnews/warn-transformer | warn_transformer/transformers/ut.py | ut.py | py | 784 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "schema.BaseTransformer",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
72329281635 | #contains pretrained timm model
import urllib
import os
# import gradio as gr
import torch
import timm
import numpy as np
from PIL import Image
import torchvision.transforms as T
import torch.nn.functional as F
from matplotlib.colors import LinearSegmentedColormap
from typing import Dict
from captum.attr import visualization as viz
#imports specifically for gradcam:
import matplotlib.pyplot as plt
from captum.robust import PGD
#download timm and captum with the following commands:
# %pip install timm shap grad-cam
# %pip install git+https://github.com/pytorch/captum.git
device = torch.device("cuda:0")
transform = T.Compose([
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
def get_prediction(model, image: torch.Tensor):
model = model.to(device)
img_tensor = image.to(device)
with torch.no_grad():
output = model(img_tensor)
output = F.softmax(output, dim=1)
prediction_score, pred_label_idx = torch.topk(output, 1)
pred_label_idx.squeeze_()
predicted_label = categories[pred_label_idx.item()]
return predicted_label, prediction_score.squeeze().item()
# print('Predicted:', predicted_label, '(', prediction_score.squeeze().item(), ')')
def image_show(img, pred):
npimg = inv_transform(img).squeeze().permute(1, 2, 0).detach().numpy()
plt.imshow(npimg)
plt.title("prediction: %s" % pred)
plt.show()
#actual prediction starting here:
MODEL: str = "resnet18"
model = timm.create_model(MODEL, pretrained=True)
model.eval()
url, filename = (
"https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt",
"imagenet_classes.txt",
)
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
transform = T.Compose([
T.Resize((224, 224)),
T.ToTensor()
])
transform_normalize = T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
images = os.listdir("/content/drive/MyDrive/img_dir/")
for image in images:
img = Image.open('/content/drive/MyDrive/img_dir/' + image)
img_tensor = transform(img)
img_tensor = img_tensor.unsqueeze(0)
img_tensor.requires_grad = True
img_tensor = img_tensor.to(device)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
inv_transform= T.Compose([
T.Normalize(
mean = (-1 * np.array(mean) / np.array(std)).tolist(),
std = (1 / np.array(std)).tolist()
),
])
print('original image: ')
pred, score = get_prediction(model, img_tensor)
image_show(img_tensor.cpu(), pred + " " + str(score))
#code for PGD:
print('applying PGD: ') #286 is egyptian cat
pgd = PGD(model, torch.nn.CrossEntropyLoss(reduction='none'), lower_bound=-1, upper_bound=1) # construct the PGD attacker
perturbed_image_pgd = pgd.perturb(inputs=img_tensor, radius=0.13, step_size=0.02,
step_num=7, target=torch.tensor([285]).to(device), targeted=True)
new_pred_pgd, score_pgd = get_prediction(model, perturbed_image_pgd)
image_show(perturbed_image_pgd.cpu(), new_pred_pgd + " " + str(score_pgd))
| shilpiprd/emlo7_solution | pgd.py | pgd.py | py | 3,265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "to... |
8404410120 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Модуль формы <icCubesOLAPSrvRequestPanelProto>.
Сгенерирован проектом DEFIS по модулю формы-прототипа wxFormBuider.
"""
import wx
from . import cubes_olap_srv_request_form_proto
import ic
from ic.log import log
from ic.utils import wxfunc
# Для управления взаимодействия с контролами wxPython
# используется менеджер форм <form_manager.icFormManager>
from ic.engine import form_manager
__version__ = (0, 1, 1, 1)
OLAP_METHODS = ('aggregate', 'members', 'facts', 'fact', 'cell', 'report')
CUT_PARAMETER_HELP = u'cut - спецификация ячейки среза, например: cut=date:2004,1|category:2|entity:12345'
DRILLDOWN_PARAMETER_HELP = u'''drilldown - измерение, который нужно "сверлить". Например drilldown=date даст строки для каждого значения
следующий уровень даты измерения. Вы можете явно указать уровень для детализации в форме: dimension:level,
таких как: drilldown=date:month. Чтобы указать иерархию используйте dimension@hierarchy как в
drilldown=date@ywd для неявного уровня или drilldown=date@ywd:week явно указать уровень.'''
AGGREGATES_PARAMETER_HELP = u'''aggregates – список агрегатов для расчета, разделяется с помошью |,
например: aggergates=amount_sum|discount_avg|count'''
MEASURES_PARAMETER_HELP = u'''measures – список мер, для которых будут рассчитаны их соответствующие агрегаты.
Разделяется с помощью |, например: aggergates=proce|discount'''
PAGE_PARAMETER_HELP = u'page - номер страницы для нумерации страниц'
PAGESIZE_PARAMETER_HELP = u'pagesize - размер страницы для разбивки на страницы'
ORDER_PARAMETER_HELP = u'order - список атрибутов для сортировки'
SPLIT_PARAMETER_HELP = u'''split – разделенная ячейка, тот же синтаксис, что и у вырезки, определяет виртуальное двоичное (флаговое) измерение, которое указывает, является ли ячейка
принадлежит разделенному разрезу (true) или нет (false). Атрибут измерения называется __within_split__.
Обратитесь к бэкэнду, который вы используете для получения дополнительной информации, поддерживается ли эта функция или нет.'''
OLAP_SERVER_URL_FMT = 'cube/%s/%s'
class icCubesOLAPSrvRequestPanel(cubes_olap_srv_request_form_proto.icCubesOLAPSrvRequestPanelProto, form_manager.icFormManager):
"""
Форма .
"""
def __init__(self, *args, **kwargs):
"""
Конструктор.
"""
cubes_olap_srv_request_form_proto.icCubesOLAPSrvRequestPanelProto.__init__(self, *args, **kwargs)
# Тестируемый OLAP сервер
self._OLAP_server = None
self._help_popup_win = None
self.init()
def setOLAPServer(self, olap_server):
"""
Установить тестируемый OLAP сервер.
:param olap_server: OLAP сервер
"""
self._OLAP_server = olap_server
if self._OLAP_server:
# Настраиваем контрол выбора кубов
choices = [cube.description if cube.description else cube.name for cube in self._OLAP_server.getCubes()]
self.cube_choice.Clear()
self.cube_choice.AppendItems(choices)
if choices:
self.cube_choice.setSelection(0)
self.method_choice.setSelection(0)
self.refreshDimensionChoice(0)
def refreshDimensionChoice(self, i_cube):
"""
Обновить список измерений в зависимости от выбранного куба.
"""
cube = self._OLAP_server.getCubes()[i_cube] if i_cube >= 0 else None
if cube:
choices = [u''] + [dimension.getLabel() for dimension in cube.getDimensions()]
self.dimension_choice.Clear()
self.dimension_choice.AppendItems(choices)
if choices:
self.dimension_choice.setSelection(0)
def init(self):
"""
Инициализация панели.
"""
self.init_img()
self.init_ctrl()
def init_img(self):
"""
Инициализация изображений.
"""
pass
def init_ctrl(self):
"""
Инициализация контролов.
"""
self.method_choice.AppendItems(OLAP_METHODS)
# Выключить все параметры
self.cut_textCtrl.Enable(False)
self.drilldown_textCtrl.Enable(False)
self.aggregates_textCtrl.Enable(False)
self.measures_textCtrl.Enable(False)
self.page_textCtrl.Enable(False)
self.pagesize_textCtrl.Enable(False)
self.order_textCtrl.Enable(False)
self.split_textCtrl.Enable(False)
# def onCubeChoice(self, event):
# """
# Обработчик выбора куба.
# """
# i_cube = event.GetSelection()
# self.refreshDimensionChoice(i_cube)
#
# event.Skip()
def onAggregatesCheckBox(self, event):
"""
Обработчик включения параметра aggregate.
"""
enable = event.IsChecked()
self.aggregates_textCtrl.Enable(enable)
event.Skip()
def show_help_popup_win(self, button, info_text):
"""
Отобразить/скрыть всплывающее окно помощи.
:param button: Кнопка вызова окна помощи.
:param info_text: Текст помощи.
:return:
"""
if self._help_popup_win:
self._help_popup_win.close()
self._help_popup_win = None
else:
self._help_popup_win = wxfunc.showInfoWindow(parent=self,
ctrl=button,
info_text=info_text)
def onAggregatesHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.aggregates_hlp_bpButton,
info_text=AGGREGATES_PARAMETER_HELP)
event.Skip()
def onCutCheckBox(self, event):
"""
Обработчик включения параметра cut.
"""
enable = event.IsChecked()
self.cut_textCtrl.Enable(enable)
event.Skip()
def onCutHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.cut_hlp_bpButton,
info_text=CUT_PARAMETER_HELP)
event.Skip()
def onDrilldownCheckBox(self, event):
"""
Обработчик включения параметра drilldown.
"""
enable = event.IsChecked()
self.drilldown_textCtrl.Enable(enable)
event.Skip()
def onDrilldownHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.drilldown_hlp_bpButton,
info_text=DRILLDOWN_PARAMETER_HELP)
event.Skip()
def onMeasuresCheckBox(self, event):
"""
Обработчик включения параметра measures.
"""
enable = event.IsChecked()
self.measures_textCtrl.Enable(enable)
event.Skip()
def onMeasuresHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.measures_hlp_bpButton,
info_text=MEASURES_PARAMETER_HELP)
event.Skip()
def onOrderCheckBox(self, event):
"""
Обработчик включения параметра order.
"""
enable = event.IsChecked()
self.order_textCtrl.Enable(enable)
event.Skip()
def onOrderHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.order_hlp_bpButton,
info_text=ORDER_PARAMETER_HELP)
event.Skip()
def onPageCheckBox(self, event):
"""
Обработчик включения параметра page.
"""
enable = event.IsChecked()
self.page_textCtrl.Enable(enable)
event.Skip()
def onPageHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.page_hlp_bpButton,
info_text=PAGE_PARAMETER_HELP)
event.Skip()
def onPagesizeCheckBox(self, event):
"""
Обработчик включения параметра pagesize.
"""
enable = event.IsChecked()
self.pagesize_textCtrl.Enable(enable)
event.Skip()
def onPagesizeHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.pagesize_hlp_bpButton,
info_text=PAGESIZE_PARAMETER_HELP)
event.Skip()
def onSplitCheckBox(self, event):
"""
Обработчик включения параметра split.
"""
enable = event.IsChecked()
self.split_textCtrl.Enable(enable)
event.Skip()
def onSplitHelpButtonClick(self, event):
"""
Подсказка по параметру.
"""
self.show_help_popup_win(self.split_hlp_bpButton,
info_text=SPLIT_PARAMETER_HELP)
event.Skip()
def setRequest(self, request):
"""
Установить запрос к серверу OLAP в структурном виде.
:param request: Словарь параметров запроса к OLAP серверу.
:return: True/False.
"""
if request is None:
request = dict()
if 'url' in request:
self.request_textCtrl.SetValue(request['url'])
cube_name = request.get('cube', None)
cube = None
if cube_name:
cubes = self._OLAP_server.getCubes()
cube_names = [cube.getName() for cube in cubes]
try:
i_cube = cube_names.index(cube_name)
cube = cubes[i_cube]
self.cube_choice.setSelection(i_cube)
except ValueError:
log.error(u'Куб с именем <%s> не найден среди %s' % (cube_name, str(cube_names)))
method_name = request.get('method', None)
if method_name:
try:
i_method = OLAP_METHODS.index(method_name)
except ValueError:
log.error(u'Метод <%s> не найден среди %s' % (method_name, str(OLAP_METHODS)))
i_method = 0
self.method_choice.setSelection(i_method)
dimension_name = request.get('dimension', None)
if dimension_name and cube:
dimensions = cube.getDimensions()
dimension_names = [dimension.getName() for dimension in dimensions]
try:
i_dimension = dimension_names.index(dimension_name) + 1
except ValueError:
log.error(u'Измерение <%s> не найденj среди %s' % (dimension_name, str(dimension_names)))
i_dimension = 0
self.dimension_choice.setSelection(i_dimension)
self.cut_checkBox.SetValue('cut' in request)
self.cut_textCtrl.Enable('cut' in request)
self.cut_textCtrl.SetValue(request.get('cut', u''))
self.drilldown_checkBox.SetValue('drilldown' in request)
self.drilldown_textCtrl.Enable('drilldown' in request)
self.drilldown_textCtrl.SetValue(request.get('drilldown', u''))
self.aggregates_checkBox.SetValue('aggregates' in request)
self.aggregates_textCtrl.Enable('aggregates' in request)
self.aggregates_textCtrl.SetValue(request.get('aggregates', u''))
self.measures_checkBox.SetValue('measures' in request)
self.measures_textCtrl.Enable('measures' in request)
self.measures_textCtrl.SetValue(request.get('measures', u''))
self.page_checkBox.SetValue('page' in request)
self.page_textCtrl.Enable('page' in request)
self.page_textCtrl.SetValue(request.get('page', u''))
self.pagesize_checkBox.SetValue('pagesize' in request)
self.pagesize_textCtrl.Enable('pagesize' in request)
self.pagesize_textCtrl.SetValue(request.get('pagesize', u''))
self.order_checkBox.SetValue('order' in request)
self.order_textCtrl.Enable('order' in request)
self.order_textCtrl.SetValue(request.get('order', u''))
self.split_checkBox.SetValue('split' in request)
self.split_textCtrl.Enable('split' in request)
self.split_textCtrl.SetValue(request.get('split', u''))
return True
def getRequest(self):
"""
Получить запрос к серверу OLAP в структурном виде.
:return: Словарь параметров запроса к OLAP серверу.
Словарь заполняется в соответствии с выбранными
параметрами контролов панели.
"""
request = dict()
i_cube = self.cube_choice.GetSelection()
cube = self._OLAP_server.getCubes()[i_cube] if i_cube >= 0 else None
cube_name = cube.getName() if cube else None
if cube_name:
request['cube'] = cube_name
i_func = self.method_choice.GetSelection()
method_name = OLAP_METHODS[i_func] if i_func >= 0 else None
if method_name:
request['method'] = method_name
i_dimension = self.dimension_choice.GetSelection() - 1
# log.debug(u'Выбранное измерение %d' % i_dimension)
dimension = (cube.getDimensions()[i_dimension] if cube else None) if i_dimension >= 0 else None
if dimension:
request['dimension'] = dimension.getName()
# Наполнить параметрами
if self.cut_checkBox.GetValue():
param = self.cut_textCtrl.GetValue().strip()
if param:
request['cut'] = param
if self.drilldown_checkBox.GetValue():
param = self.drilldown_textCtrl.GetValue().strip()
if param:
request['drilldown'] = param
if self.aggregates_checkBox.GetValue():
param = self.aggregates_textCtrl.GetValue().strip()
if param:
request['aggregates'] = param
if self.measures_checkBox.GetValue():
param = self.measures_textCtrl.GetValue().strip()
if param:
request['measures'] = param
if self.page_checkBox.GetValue():
param = self.page_textCtrl.GetValue().strip()
if param:
request['page'] = param
if self.pagesize_checkBox.GetValue():
param = self.pagesize_textCtrl.GetValue().strip()
if param:
request['pagesize'] = param
if self.order_checkBox.GetValue():
param = self.order_textCtrl.GetValue().strip()
if param:
request['order'] = param
if self.split_checkBox.GetValue():
param = self.split_textCtrl.GetValue().strip()
if param:
request['split'] = param
return request
def getRequestURL(self, request=None):
"""
Получить URL запроса к серверу OLAP по его структурному описанию.
:return: Словарь параметров запроса к OLAP серверу.
Если не определен, то берется из контролов.
"""
if request is None:
request = self.getRequest()
try:
full_request_url = self._OLAP_server.getRequestURL(request)
return full_request_url
except:
log.fatal(u'Ошибка получения полного запроса URL к OLAP серверу')
return u''
def show_cubes_olap_srv_request_panel(title=u''):
"""
:param title: Заголовок страницы нотебука главного окна.
"""
try:
main_win = ic.getMainWin()
panel = icCubesOLAPSrvRequestPanel(main_win)
# panel.init()
main_win.addPage(panel, title)
except:
log.fatal(u'Ошибка')
| XHermitOne/defis3 | analitic/analitic/olap/cubes/cubes_olap_srv_request_panel.py | cubes_olap_srv_request_panel.py | py | 17,669 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "ic.engine.form_manager.icFormManager",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "ic.engine.form_manager",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "ic.utils.wxfunc.showInfoWindow",
"line_number": 146,
"usage_type": "call... |
14448299607 | from .models import Teacher
from .models import Group
from .models import TeacherSubject
from .models import TypedSubject
from .models import Day
from .models import TimeTable
from .models import LessonType
import itertools
import itertools
from pulp import *
problem = LpProblem("schedule-opt", LpMaximize)
from faker import Faker
fake = Faker()
from collections import namedtuple
N_l = 20 # number of teachers
N_g = 20 # number of groups
N_s = 20 # number of courses
N_d = 12 # number of days
N_t = 8 # number of times per day
L = [fake.name() for _ in range(N_l)]
G = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,z,y,z"[0:2 * N_g].split(',')[:-1]
types = ['lection', 'lab', 'practice']
subjs = [fake.job().replace(',', '')[0:15] for _ in range(N_s)]
D = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,z,y,z"[0:2 * N_d].split(',')[:-1]
T = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,z,y,z"[0:2 * N_t].split(',')[:-1]
def get_all_sp_keys(L, G, types, subjs, D, T):
return itertools.product(L, G, types, subjs, D, T)
def get_all_gdt_keys(GG, DD, TT):
return itertools.product(GG, DD, TT)
def get_all_ldt_keys(LL, DD, TT):
return itertools.product(LL, DD, TT)
SP = LpVariable.dicts("SP", get_all_sp_keys(L, G, types, subjs, D, T), 0, 1, cat=pulp.LpInteger)
Ig = LpVariable.dicts("Ig", get_all_gdt_keys(G, D, T[:-1]), -1, 1, cat=pulp.LpInteger)
Il = LpVariable.dicts("Il", get_all_ldt_keys(L, D, T[:-1]), -1, 1, cat=pulp.LpInteger)
one = LpVariable("one", 1, 1, cat=pulp.LpInteger)
zero = LpVariable("zero", 0, 0, cat=pulp.LpInteger)
def get_x_by_gdt(g, d, t, SP):
s = lpSum([SP[key] for key in itertools.product(L, [g], types, subjs, [d], [t])])
return s
def get_x_by_ldt(l, d, t, SP):
s = lpSum([SP[key] for key in itertools.product([l], G, types, subjs, [d], [t])])
return s
def get_x_by_ldt(l, d, t, SP):
s = lpSum([SP[key] for key in itertools.product([l], G, types, subjs, [d], [t])])
return s
def sum_x_by_g(g, SP):
sum = lpSum([SP[key] for key in itertools.product(L, [g], types, subjs, D, T)])
return sum
def sum_x_by_l(l, SP):
sum = lpSum([SP[key] for key in itertools.product([l], G, types, subjs, D, T)])
return sum
alpha = 1.0
beta = 1.0
gamma = 1.0
def get_c(key):
return 1.0
#Problem
all_lectures = [alpha * get_c(key) * SP[key] for key in get_all_sp_keys(L, G, types, subjs, D, T)]
all_groups_no_gaps = [beta * Ig[key] for key in get_all_gdt_keys(G, D, T[:-1])]
all_teachers_no_gaps = [gamma * Il[key] for key in get_all_ldt_keys(L, D, T[:-1])]
all_lectures + all_groups_no_gaps + all_teachers_no_gaps
problem += lpSum(all_lectures + all_groups_no_gaps + all_teachers_no_gaps)
#Constraints
#Minimize gaps between lessons for teacher
def next_letter(symbol):
return T[(T.index(symbol) + 1) % len(T)]
for (teacher, day, time) in itertools.product(L, D, T[:-1]):
# problem += get_x_by_ldt(teacher, day, time, SP) + get_x_by_ldt(teacher, day, time + 1, SP) -1 >= Il[teacher, day, time]
problem += get_x_by_ldt(teacher, day, time, SP) + get_x_by_ldt(teacher, day, next_letter(time), SP) - Il[
teacher, day, time] >= 1
#Minimize gaps between lessons for group
for (group, day, time) in itertools.product(G, D, T[:-1]):
# problem += get_x_by_gdt(group, day, time, SP) + get_x_by_gdt(group, day, time + 1, SP) -1 >= Ig[group, day, time]
problem += get_x_by_gdt(group, day, time, SP) + get_x_by_gdt(group, day, next_letter(time), SP) - Ig[
group, day, time] >= 1
#with open('problem.txt', 'w') as f: print(problem, file=f)
#Amount of lessons for group must be no more than payload in plan
for g in G:
problem += sum_x_by_g(g, SP) <= len(D) * 3
#Amount of lessons for teacher must be no more than payload in plan
for teacher in L:
problem += sum_x_by_l(teacher, SP) <= len(D) * 3
#One lesson per time slot for one group
def sum_x_for_group_at_dt(g, d, t, SP):
s = 0
for key in itertools.product(L, [g], types, subjs, [d], [t]):
s += SP[key]
return s
for key in get_all_gdt_keys(G, D, T):
s = sum_x_for_group_at_dt(*key, SP)
problem += s <= 1
problem += s >= 0
#One lesson per time slot for one teacher
def sum_x_for_teacher_at_dt(l, d, t, SP):
s = 0
for key in itertools.product([l], G, types, subjs, [d], [t]):
s += SP[key]
return s
for key in get_all_ldt_keys(L, D, T):
s = sum_x_for_teacher_at_dt(*key, SP)
problem += s <= 1
problem += s >= 0
#No more than 3 lessons for one group per day
def sum_x_for_group_per_day(g, d, SP):
s = 0
for key in itertools.product(L, [g], types, subjs, [d], T):
s += SP[key]
return s
for (group, day) in itertools.product(G, D):
problem += sum_x_for_group_per_day(group, day, SP) <= 3
#No more than 4 lessons for one teacher per day
def sum_x_for_teacher_per_day(l, d, SP):
s = 0
for key in itertools.product([l], G, types, subjs, [d], T):
s += SP[key]
return s
for (teacher, day) in itertools.product(L, D):
problem += sum_x_for_teacher_per_day(teacher, day, SP) <= 4
#No more than 2 lessons of one subject for group per day
def sum_x_of_same_subject_for_group_per_day(g, d, subj, SP):
s = 0
for key in itertools.product(L, [g], types, [subj], [d], T):
s += SP[key]
return s
for (group, day, subj) in itertools.product(G, D, subjs):
problem += sum_x_of_same_subject_for_group_per_day(group, day, subj, SP) <= 2#Solution
#need to fix it problem.solve(solver=pulp.glpk(options=['--log', 'lessons.log']))
# problem.solve(solver=solvers.GLPK(options=['--log', 'lessons.log', '--wmps', 'lessons.mps', '--check']))
def print_solution(problem):
print("Status:", LpStatus[problem.status])
for v in problem.variables():
if (v.varValue > 0.0):
print(v.name, "=", v.varValue)
# print("Total Cost =", pulp.value(problem.objective))
print_solution(problem)
print(value(problem.objective))
#Match lessons to rooms
TheSix = namedtuple('TheSix', ['teacher', 'group', 'lesson_type', 'subject', 'day', 'time'])
def get_tuple(v):
# SP_('Shane_Gordon',_0,_'lab',_'Paramedic',_0,_'b')
x = v.name.split(',_') # ["SP_('Shane_Gordon'", '0', "'lab'", "'Paramedic'", '0', "'b')"]
start = "'"
end = "'"
return TheSix(*list(map(lambda s: s[s.find(start) + len(start):s.rfind(end)].replace('_', ' '), x)))
def extract_optimal_solution(problem):
optimals = [get_tuple(v) for v in problem.variables() if v.varValue > 0.0 and v.name.startswith('SP')]
return optimals
N_r = 26
R = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,z,y,z"[0:2 * N_r].split(',')[:-1]
#TypedRoom
TypedRoom = namedtuple('TypedRoom', ['room', 'type'])
lection_rooms_number = 3
lab_rooms_number = 3
practice_rooms_number = 3
lections_rooms = list(map(lambda room: TypedRoom(room=room, type='lection'), R[0:lection_rooms_number]))
lab_rooms = list(
map(lambda room: TypedRoom(room=room, type='lab'), R[lection_rooms_number:lection_rooms_number + lab_rooms_number]))
practice_rooms = list(map(lambda room: TypedRoom(room=room, type='practice'), R[
lection_rooms_number + lab_rooms_number: lection_rooms_number + lab_rooms_number + practice_rooms_number]))
available_rooms = lections_rooms + lab_rooms + practice_rooms
lessons = extract_optimal_solution(problem)
problem_match = LpProblem("match-to-rooms", LpMaximize)
Z = LpVariable.dicts("Z", list(
map(lambda pair: (tuple(pair[0]), tuple(pair[1])), itertools.product(lessons, available_rooms))), 0, 1,
cat=pulp.LpInteger)
#Rooms have priorities for appropriate lessons
priority = {
'lection': {'lection': 1.0},
'lection': {'practice': 0.5},
'lection': {'lab': 0.0},
'practice': {'lection': 0.0},
'practice': {'practice': 1.0},
'practice': {'lab': 0.0},
'lab': {'lection': 0.0},
'lab': {'practice': 0.5},
'lab': {'lab': 1.0},
} # from room type to lesson type
def get_c(key):
return priority[key[1].type][key[0].lesson_type]
problem_match += lpSum([get_c(key) * Z[(key[0], key[1])] for key in itertools.product(lessons, available_rooms)])
def get_z_by_lesson(lesson, Z):
s = lpSum([Z[(key[0], key[1])] for key in itertools.product([lesson], available_rooms)])
return s
def get_lessons_by_day_time(d, t, lessons):
return list(filter(lambda lesson: lesson.day == d and lesson.time == t, lessons))
def get_z_by_day_time(d, t, Z):
lessons_at_moment = get_lessons_by_day_time(d, t, lessons)
if len(lessons_at_moment) > 0:
return lpSum([Z[(key[0], key[1])] for key in itertools.product(lessons_at_moment, available_rooms)])
return None
for (d, t) in itertools.product(D, T):
contraint = get_z_by_day_time(d, t, Z)
if contraint:
problem_match += contraint <= 1
for lesson in lessons:
problem_match += get_z_by_lesson(lesson, Z) == 1
#Solution
#need to fix it with open('problem_match.txt', 'w') as f:
#need to fix it print(problem_match, file=f)
#need to fix it problem_match.solve(solver=solvers.GLPK(options=['--log', 'match.log', '--wmps', 'match.mps']))
print(value(problem_match.objective))
print_solution(problem_match)
| osboo/schedule-opt | schedule_app/schedules/optimizer.py | optimizer.py | py | 9,228 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "faker.Faker",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "itertools.product",
... |
73454293153 | from tkinter import *
from tkinter import ttk, messagebox
import statistics as stats
import MySQLdb
import numpy as np
import matplotlib.pyplot as plt
patronNomApe = '^([A-Z]\D+)$'
patronCp = '\d{5}$'
patronTelefono = '\d{9}$'
patronCod_Historia = '\d'
patronfecha = '^([0-2][0-9]|(3)[0-1])(\/)(((0)[0-9])|((1)[0-2]))(\/)\d{4}$'
def mostrarGrafica():
lista = obtenerDatos()
n_data = 3
values_1 = (lista[0], lista[1], lista[2])
fig, ax = plt.subplots()
index = np.arange(n_data)
bar_width = 0.35
rects1 = plt.bar(index, values_1, bar_width, color='r', label='Valores 1')
plt.xlabel('Etiquetas')
plt.ylabel('Valores')
plt.title('Comparación Médicos, Pacientes e Ingresos')
plt.xticks(index + bar_width, ('Pacientes', 'Ingresos', 'Medicos'))
plt.legend
plt.tight_layout()
plt.show()
def media():
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
lista = []
lista2 = []
cur = miConexion.cursor()
cur.execute("SELECT fecha_ingreso from ingresos group by fecha_ingreso")
for row in cur.fetchall():
lista.append(row)
for i in lista:
sql = ("SELECT count(fecha_ingreso) from ingresos where fecha_ingreso = %s")
datos = (i)
cur.execute(sql, datos)
for row in cur.fetchall():
lista2.append(row)
miConexion.close()
lista2 = set().union(*lista2)
medi.set(stats.mean(lista2))
def calcularPorcentaje():
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
num1 = 0
num2 = 0
porcentj = 0
lista1 = []
lista2 = []
fechporcen = fech.get()
cur = miConexion.cursor()
sql = ("SELECT fecha_ingreso from ingresos where fecha_ingreso = %s")
datos = fechporcen
cur.execute(sql, [datos])
for row in cur.fetchall():
lista1.append(row)
cur.execute("SELECT fecha_ingreso from ingresos")
for row in cur.fetchall():
lista2.append(row)
num2 = len(lista2)
num1 = len(lista1)
miConexion.close()
porcentj = num1 * num2
porcen.set("El porcentaje es %s" % porcentj)
def obtenerDatos():
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
lista = []
pacientes = cur.execute("SELECT count(codpac) FROM paciente group by codpac")
ingresos = cur.execute("SELECT count(coding) FROM ingresos group by coding")
medicos = cur.execute("SELECT count(codmed) FROM medicos group by codmed")
lista.append(pacientes)
lista.append(ingresos)
lista.append(medicos)
print(lista)
miConexion.close()
return lista
def seleccion(a):
try:
print("seleccionado")
name = tabla.item(tabla.selection())
name = name.get('values')
codpaciente.set(name[0])
ss.set(name[1])
nomb.set(name[2])
ape1.set(name[3])
dom.set(name[4])
pob.set(name[5])
prov.set(name[6])
codp.set(name[7])
tel.set(name[8])
numi.set(name[9])
obs.set(name[10])
except:
print("sin selección")
def seleccionIngreso(a):
try:
name = tablaIngresos.item(tablaIngresos.selection())
name = name.get('values')
codingreso.set(name[0])
proc.set(name[1])
fech.set(name[2])
numplan.set(name[3])
numcam.set(name[4])
observaci.set(name[5])
except:
print("sin selección")
def seleccionMedico(a):
try:
name = tablaMedicos.item(tablaMedicos.selection())
name = name.get('values')
codmed.set(name[0])
nom.set(name[1])
ape.set(name[2])
esp.set(name[3])
numcol.set(name[4])
cargo.set(name[5])
obser.set(name[6])
except:
print("sin selección")
def coincidencia(patron, parametro):
match = re.match(patron, parametro)
return match
def verificacion(codi, seg, nombre, apellido, cp, numhis, tele):
correcto = True
apellidolista = apellido.split(" ")
if (coincidencia(patronCod_Historia, codi) is None):
correcto = False
messagebox.showerror("Error", "El código paciente debe ser numérico")
if (coincidencia(patronCod_Historia, seg) is None):
correcto = False
messagebox.showerror("Error", "La Seguridad social debe ser numérico")
if (coincidencia(patronNomApe, nombre) is None):
correcto = False
messagebox.showerror("Error", "El nombre solo debe contener letras y la primera en maysuculas")
for i in apellidolista:
if coincidencia(patronNomApe, i) is None:
correcto = False
messagebox.showerror("Error", "El apellido solo debe contener letras y la primera en maysuculas")
if (coincidencia(patronCp, cp) is None):
correcto = False
messagebox.showerror("Error", "El código postal debe tener al menos 5 digitos")
if (coincidencia(patronCod_Historia, numhis) is None):
correcto = False
messagebox.showerror("Error", "El historia debe ser numérico")
if (coincidencia(patronTelefono, tele) is None):
correcto = False
messagebox.showerror("Error", "El telefono debe tener 9 digitos")
return correcto
def verificacionIngreso(cd, fe, nup, nuc):
correcto = True
if (coincidencia(patronCod_Historia, cd) is None):
correcto = False
messagebox.showerror("Error", "El código ingreso debe ser numérico")
if (coincidencia(patronfecha, fe) is None):
correcto = False
messagebox.showerror("Error", "el campofecha debe ser dd/mm/yyyy")
if (coincidencia(patronCod_Historia, nup) is None):
correcto = False
messagebox.showerror("Error", "el campo numero de planta tiene que ser numérico")
if (coincidencia(patronCod_Historia, nuc) is None):
correcto = False
messagebox.showerror("Error", "el campo numero de cama tiene que ser numérico")
return correcto
def verificacionMedico(cm, nm, ap, nuc):
correcto = True
apellidolista = ap.split(" ")
if (coincidencia(patronCod_Historia, cm) is None):
correcto = False
messagebox.showerror("Error", "El código de medico debe ser numérico")
if (coincidencia(patronNomApe, nm) is None):
correcto = False
messagebox.showerror("Error", "el nombre tiene que ser en la primera en maysuculas y solo contener letras")
for i in apellidolista:
if coincidencia(patronNomApe, i) is None:
correcto = False
messagebox.showerror("Error", "El apellido solo debe contener letras y la primera en maysuculas")
if (coincidencia(patronCod_Historia, nuc) is None):
correcto = False
messagebox.showerror("Error", "el campo número de colegiado tiene que ser numérico")
return correcto
def limpiar():
codpaciente.set("")
ss.set("")
nomb.set("")
ape1.set("")
dom.set("")
pob.set("")
prov.set("")
codp.set("")
tel.set("")
numi.set("")
obs.set("")
def limpiaringreso():
codingreso.set("")
proc.set("")
fech.set("")
numplan.set("")
numcam.set("")
observaci.set("")
def limpiarMedico():
codmed.set("")
nom.set("")
ape.set("")
esp.set("")
numcol.set("")
cargo.set("")
obser.set("")
def mostrardatos():
tabla.delete(*tabla.get_children())
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
cur.execute("SELECT * FROM paciente order by codpac desc ")
for row in cur.fetchall():
tabla.insert("", 0, values=row)
miConexion.close()
def mostrardatosIngresos():
tablaIngresos.delete(*tablaIngresos.get_children())
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
cur.execute("SELECT * FROM ingresos order by coding desc ")
for row in cur.fetchall():
tablaIngresos.insert("", 0, values=row)
miConexion.close()
def mostrardatosMedicos():
tablaMedicos.delete(*tablaMedicos.get_children())
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
cur.execute("SELECT * FROM medicos order by codmed desc ")
for row in cur.fetchall():
tablaMedicos.insert("", 0, values=row)
miConexion.close()
def insertapaciente():
codi = codpaciente.get()
seg = (ss.get())
nombre = (nomb.get())
apellido = ape1.get()
domi = dom.get()
poblacio = pob.get()
provin = prov.get()
cp = codp.get()
tele = tel.get()
numhis = numi.get()
obcer = obs.get()
if (verificacion(codi, seg, nombre, apellido, cp, numhis, tele)):
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "INSERT INTO paciente (codpac, numseg,nombre,apellido,domicilio,provincia,poblacion,cp,telefono,numhist,observa) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
datos = (codi, seg, nombre, apellido, domi, poblacio, provin, cp, tele, numhis, obcer)
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatos()
limpiar()
def insertarMedicos():
cm = codmed.get()
nm = nom.get()
ap = ape.get()
es = esp.get()
nuc = numcol.get()
car = cargo.get()
ob = obser.get()
if (verificacionMedico(cm, nm, ap, nuc)):
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "INSERT INTO medicos (codmed, nombre,apellidos,especialidad,num_colegiado,cargo,observaciones) VALUES (%s,%s,%s,%s,%s,%s,%s)"
datos = (cm, nm, ap, es, nuc, car, ob)
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatosMedicos()
limpiarMedico()
def modificapaciente():
codi = codpaciente.get()
seg = (ss.get())
nombre = (nomb.get())
apellido = ape1.get()
domi = dom.get()
poblacio = pob.get()
provin = prov.get()
cp = codp.get()
tele = tel.get()
numhis = numi.get()
obcer = obs.get()
if (verificacion(codi, seg, nombre, apellido, cp, numhis, tele)):
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "UPDATE paciente SET numseg=%s,nombre=%s,apellido=%s,domicilio=%s,provincia=%s,poblacion=%s,cp=%s,telefono=%s,numhist=%s,observa=%s WHERE codpac = %s"
datos = (seg, nombre, apellido, domi, poblacio, provin, cp, tele, numhis, obcer, codi)
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatos()
limpiar()
def modificarMedicos():
cm = codmed.get()
nm = nom.get()
ap = ape.get()
es = esp.get()
nuc = numcol.get()
car = cargo.get()
ob = obser.get()
if (verificacionMedico(cm, nm, ap, nuc)):
try:
name = tablaMedicos.item(tablaMedicos.selection())
name = name.get('values')
name = str(name[0])
except:
print("no has seleccionado nada")
codi = codmed.get()
if codi == 0:
codi = name
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "UPDATE medicos SET nombre=%s,apellidos=%s,especialidad=%s,num_colegiado=%s,cargo=%s,observaciones=%s WHERE codmed = %s"
datos = (nm, ap, es, nuc, car, ob, cm)
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatosMedicos()
limpiarMedico()
def borrar():
try:
name = tabla.item(tabla.selection())
name = name.get('values')
name = str(name[0])
except:
print("no has seleccionado nada")
codi = codpaciente.get()
if (coincidencia(patronCod_Historia, codi) is None):
messagebox.showerror("Error", "El código paciente debe ser numérico")
else:
try:
if (codi != ""):
name = codi
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "delete from paciente WHERE codpac = %s"
datos = name
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatos()
limpiar()
except:
messagebox.showerror("Error", "no existe ese id")
def borrarMedicos():
try:
name = tablaMedicos.item(tablaMedicos.selection())
name = name.get('values')
name = str(name[0])
except:
print("no has seleccionado nada")
codi = codmed.get()
if (coincidencia(patronCod_Historia, codi) is None):
messagebox.showerror("Error", "El código debe ser numérico")
else:
try:
if (codi != ""):
name = codi
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "delete from medicos WHERE codmed = %s"
datos = codi
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatosMedicos()
limpiarMedico()
except:
messagebox.showerror("Error", "no existe ese id")
def insertarIngresos():
cd = codingreso.get()
pr = proc.get()
fe = fech.get()
nup = numplan.get()
nuc = numcam.get()
ob = observaci.get()
if (verificacionIngreso(cd, fe, nup, nuc)):
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "INSERT INTO ingresos (coding, procedencia,fecha_ingreso,num_planta,num_cama,observaciones) VALUES (%s,%s,%s,%s,%s,%s)"
datos = (cd, pr, fe, nup, nuc, ob)
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatosIngresos()
limpiaringreso()
def modificarIngresos():
cd = codingreso.get()
pr = proc.get()
fe = fech.get()
nup = numplan.get()
nuc = numcam.get()
ob = observaci.get()
if (verificacionIngreso(cd, fe, nup, nuc)):
try:
name = tablaIngresos.item(tablaIngresos.selection())
name = name.get('values')
name = str(name[0])
except:
print("no has seleccionado nada")
codi = codingreso.get()
try:
if codi == 0:
codi = name
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "UPDATE ingresos SET procedencia=%s,fecha_ingreso=%s,num_planta=%s,num_cama=%s,observaciones=%s WHERE coding = %s"
datos = (pr, fe, nup, nuc, ob, codi)
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatosIngresos()
limpiaringreso()
except:
messagebox.showerror("Error", "no existe ese id")
def borrarIngresos():
try:
name = tablaIngresos.item(tablaIngresos.selection())
name = name.get('values')
name = str(name[0])
except:
print("no has seleccionado nada")
codi = codingreso.get()
if (coincidencia(patronCod_Historia, codi) is None):
messagebox.showerror("Error", "El código debe ser numérico")
else:
try:
if (codi != ""):
name = codi
miConexion = MySQLdb.connect(host='localhost', user='root', passwd='root', db='hospital')
cur = miConexion.cursor()
sql = "delete from ingresos WHERE coding = %s"
datos = codi
cur.execute(sql, datos)
miConexion.commit()
miConexion.close()
mostrardatosIngresos()
limpiaringreso()
except:
messagebox.showerror("Error", "El id no existe")
ventana = Tk()
ventana.title("Hospital")
notebook = ttk.Notebook(ventana)
notebook.pack(fill='both', expand='yes')
# creación de pestañas
pes0 = ttk.Frame(notebook)
pes1 = ttk.Frame(notebook)
pes2 = ttk.Frame(notebook)
notebook.add(pes0, text='Paciente')
notebook.add(pes1, text='Ingresos')
notebook.add(pes2, text='Médico')
# etiquetas paciente
codpa = Label((pes0), text='Codigo pac.').place(x=20, y=10)
numSeg = Label((pes0), text='Nº de Seguridad Social').place(x=20, y=40)
nombre = Label((pes0), text='Nombre').place(x=20, y=70)
apellido = Label((pes0), text='Apellidos').place(x=20, y=100)
domicilio = Label((pes0), text='Domicilio').place(x=20, y=130)
poblacion = Label((pes0), text='Población').place(x=20, y=160)
provincia = Label((pes0), text='Provincia').place(x=20, y=190)
cp = Label((pes0), text='Código postal').place(x=20, y=220)
telefono = Label((pes0), text='Nº de teléfono').place(x=20, y=250)
numHistoria = Label((pes0), text='Nº historial clínico').place(x=20, y=280)
obeserva = Label((pes0), text='Observaciones').place(x=20, y=310)
# etiquetas ingresos
codingres = Label((pes1), text='Codigo ing.').place(x=20, y=10)
proce = Label((pes1), text='Procedencia').place(x=20, y=40)
fecha_ing = Label((pes1), text='Fecha de ingreso').place(x=20, y=70)
num_planta = Label((pes1), text='Número de planta').place(x=20, y=100)
num_cama = Label((pes1), text='Número de cama').place(x=20, y=130)
observa = Label((pes1), text='Observaciones').place(x=20, y=160)
med = Label((pes1), text='Media ingreso/dia').place(x=20, y=190)
por = Label((pes1), text='Porcentaje ingresos/día').place(x=20, y=220)
# variables para recoger el texto
codpaciente = StringVar()
ss = StringVar()
nomb = StringVar()
ape1 = StringVar()
dom = StringVar()
pob = StringVar()
prov = StringVar()
codp = StringVar()
tel = StringVar()
numi = StringVar()
obs = StringVar()
# campos de texto
codpatexto = ttk.Entry(pes0, textvariable=codpaciente).place(x=150, y=10)
numSegText = ttk.Entry(pes0, textvariable=ss).place(x=150, y=40)
nombreTexto = ttk.Entry(pes0, textvariable=nomb).place(x=150, y=70)
apellidoTexto = ttk.Entry(pes0, textvariable=ape1).place(x=150, y=100)
domicilioTexto = ttk.Entry(pes0, textvariable=dom).place(x=150, y=130)
poblacionTexto = ttk.Entry(pes0, textvariable=pob).place(x=150, y=160)
provinciaTexto = ttk.Entry(pes0, textvariable=prov).place(x=150, y=190)
cpTexto = ttk.Entry(pes0, textvariable=codp).place(x=150, y=220)
telefonoTexto = ttk.Entry(pes0, textvariable=tel).place(x=150, y=250)
numHistoriaTexto = ttk.Entry(pes0, textvariable=numi).place(x=150, y=280)
obeservaTexto = ttk.Entry(pes0, textvariable=obs).place(x=150, y=310)
# variables para recoger pestaña ingresos
codingreso = StringVar()
proc = StringVar()
fech = StringVar()
numplan = StringVar()
numcam = StringVar()
observaci = StringVar()
medi = StringVar()
porcen = StringVar()
# campos de texto ingresos
codingTexto = ttk.Entry(pes1, textvariable=codingreso).place(x=150, y=10)
proceTexto = ttk.Entry(pes1, textvariable=proc).place(x=150, y=40)
fechaTexto = ttk.Entry(pes1, textvariable=fech).place(x=150, y=70)
numPlantaTexto = ttk.Entry(pes1, textvariable=numplan).place(x=150, y=100)
numCamaTexto = ttk.Entry(pes1, textvariable=numcam).place(x=150, y=130)
observTexto = ttk.Entry(pes1, textvariable=observaci).place(x=150, y=160)
meddia = ttk.Entry(pes1, textvariable=medi, state=DISABLED).place(x=150, y=190)
porcentaje = ttk.Entry(pes1, textvariable=porcen, state=DISABLED).place(x=150, y=220)
# formación de tabla
style = ttk.Style()
style.configure("Treeview.Heading", font=(None, 8))
tabla = ttk.Treeview(pes0, colum=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))
tabla.column(1, width=50)
tabla.column(2, width=60)
tabla.column(3, width=60)
tabla.column(4, width=60)
tabla.column(5, width=60)
tabla.column(6, width=60)
tabla.column(7, width=80)
tabla.column(8, width=60)
tabla.column(9, width=80)
tabla.column(10, width=60)
tabla.column(11, width=90)
tabla.heading(1, text="codpac")
tabla.heading(2, text="numseg")
tabla.heading(3, text="nombre")
tabla.heading(4, text="apellido")
tabla.heading(5, text="domicilio")
tabla.heading(6, text="provincia")
tabla.heading(7, text="poblacion")
tabla.heading(8, text="cp")
tabla.heading(9, text="telefono")
tabla.heading(10, text="numHist")
tabla.heading(11, text="oserva")
tabla['show'] = 'headings'
tabla.bind('<ButtonRelease-1>', seleccion)
# formación de tabla ingresos
style = ttk.Style()
style.configure("Treeview.Heading", font=(None, 8))
tablaIngresos = ttk.Treeview(pes1, colum=(1, 2, 3, 4, 5, 6))
tablaIngresos.column(1, width=60)
tablaIngresos.column(2, width=90)
tablaIngresos.column(3, width=90)
tablaIngresos.column(4, width=70)
tablaIngresos.column(5, width=60)
tablaIngresos.column(6, width=90)
tablaIngresos.heading(1, text="coding")
tablaIngresos.heading(2, text="procedencia")
tablaIngresos.heading(3, text="fecha_ingreso")
tablaIngresos.heading(4, text="num_planta")
tablaIngresos.heading(5, text="num_cama")
tablaIngresos.heading(6, text="observaciones")
tablaIngresos.bind('<ButtonRelease-1>', seleccionIngreso)
tablaIngresos['show'] = 'headings'
# botones pacientes
botoninserta = Button(pes0, text="Inserta", command=insertapaciente, height=2, width=7)
botoninserta.place(x=330, y=300)
botonmodifica = Button(pes0, text="Modifica", command=modificapaciente, height=2, width=7)
botonmodifica.place(x=400, y=300)
botonborra = Button(pes0, text="Borra", command=borrar, height=2, width=7)
botonborra.place(x=470, y=300)
botongrafica = Button(pes0, text="Gráfico", command=mostrarGrafica, height=2, width=7)
botongrafica.place(x=540, y=300)
tabla.place(x=40, y=380)
# botones ingresos
botonInsertaIngreso = Button(pes1, text="Insertar", command=insertarIngresos, height=2, width=7)
botonInsertaIngreso.place(x=70, y=270)
botonModificaIngreso = Button(pes1, text="Modificar", command=modificarIngresos, height=2, width=7)
botonModificaIngreso.place(x=150, y=270)
botonBorraIngreso = Button(pes1, text="Borrar", command=borrarIngresos, height=2, width=7)
botonBorraIngreso.place(x=230, y=270)
botonamedia = Button(pes1, text="Media", command=media, height=2, width=7)
botonamedia.place(x=310, y=270)
botonporcen = Button(pes1, text="Porcentaje", command=calcularPorcentaje, height=2, width=7)
botonporcen.place(x=390, y=270)
tablaIngresos.place(x=50, y=350)
print(tabla.selection_set())
print(tabla.focus())
mostrardatos()
print(tablaIngresos.selection_set())
print(tablaIngresos.focus())
mostrardatosIngresos()
# etiquetas medicos
codme = Label((pes2), text='Codigo med.').place(x=20, y=10)
nom = Label((pes2), text='Nombre').place(x=20, y=40)
ape = Label((pes2), text='Apellidos').place(x=20, y=70)
esp = Label((pes2), text='Especialidad').place(x=20, y=100)
num_col = Label((pes2), text='Número de colegiado').place(x=20, y=130)
cargo = Label((pes2), text='Cargo').place(x=20, y=160)
obser = Label((pes2), text='Observaciones').place(x=20, y=190)
# variables para recoger pestaña medicos
codmed = StringVar()
nom = StringVar()
ape = StringVar()
esp = StringVar()
numcol = StringVar()
cargo = StringVar()
obser = StringVar()
# campos de texto medicos
codmedTexto = ttk.Entry(pes2, textvariable=codmed).place(x=150, y=10)
nomTexto = ttk.Entry(pes2, textvariable=nom).place(x=150, y=40)
apeTexto = ttk.Entry(pes2, textvariable=ape).place(x=150, y=70)
espTexto = ttk.Entry(pes2, textvariable=esp).place(x=150, y=100)
numcolTexto = ttk.Entry(pes2, textvariable=numcol).place(x=150, y=130)
cargoTexto = ttk.Entry(pes2, textvariable=cargo).place(x=150, y=160)
obserTexto = ttk.Entry(pes2, textvariable=obser).place(x=150, y=190)
# formación de tabla medicos
style = ttk.Style()
style.configure("Treeview.Heading", font=(None, 8))
tablaMedicos = ttk.Treeview(pes2, colum=(1, 2, 3, 4, 5, 6, 7))
tablaMedicos.column(1, width=60)
tablaMedicos.column(2, width=60)
tablaMedicos.column(3, width=60)
tablaMedicos.column(4, width=60)
tablaMedicos.column(5, width=60)
tablaMedicos.column(6, width=60)
tablaMedicos.column(7, width=60)
tablaMedicos.heading(1, text="codmed")
tablaMedicos.heading(2, text="nombre")
tablaMedicos.heading(3, text="apellidos")
tablaMedicos.heading(4, text="especialidad")
tablaMedicos.heading(5, text="num_colegiado")
tablaMedicos.heading(6, text="cargo")
tablaMedicos.heading(7, text="observaciones")
tablaMedicos['show'] = 'headings'
tablaMedicos.bind('<ButtonRelease-1>', seleccionMedico)
# botones medicos
botonInsertaMedico = Button(pes2, text="Insertar", command=insertarMedicos, height=2, width=7)
botonInsertaMedico.place(x=70, y=250)
botonModificaMedico = Button(pes2, text="Modificar", command=modificarMedicos, height=2, width=7)
botonModificaMedico.place(x=150, y=250)
botonBorraMedico = Button(pes2, text="Borrar", command=borrarMedicos, height=2, width=7)
botonBorraMedico.place(x=230, y=250)
tablaMedicos.place(x=90, y=350)
print(tablaMedicos.selection_set())
print(tablaMedicos.focus())
mostrardatosMedicos()
ventana.geometry("800x700")
ventana.mainloop()
| daguilerap/Hospital | proyectofinal/main.py | main.py | py | 25,321 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.... |
71884000353 | from __future__ import absolute_import, division
from collections import defaultdict
from .components import Edge, Node
from .exceptions import InvalidNodeTypeError, MaxNodeError, GraphTypeError
from .utils import check_cycle
from .algorithms._search import BFS, DFS
from .algorithms._sort import topological_sort
from ._vis.layout import plot_graph_directed, plot_graph_undirected
from graphnet import VECTOR, SCALAR
import matplotlib.pyplot as plt
import numpy as np
class Graph(object):
"""
Implements the Graph data structure which contains nodes or vertices linked with edges between them
Graph helps in managing relationship between nodes in a network with their edges.
Parameters
----------
max_node:int, float, optional, default=infinity
the maximum amount of nodes that can be added to the graph,
default is infinity which means in theory nodes can be added
to the graph indefinitely.
max_edge:int, float, optional, default=infinity
the maximum amount of edges that can be added to the graph,
default is infinity
type:{'scalar'or'S', 'vector'or'V'},optional, default='scalar'
this specifies the graph type, the are two graph types 'vector'
which represents directional graphs and 'scalar' which represents
un-directional graphs. the type of of graph determines the behavior
of the graph instance and which algorithm work with it. optionally
you can substutute 'scalar' with 'S' and 'vector' with 'V'.
ref:str,optional,default="value"
the attribute used by the graph for referencing the node object passed in
for identification and accessing as key value for the graph. change this
if you are using a inherited node object to the object identifier else if
you're using the built in Node class directly then leave it as default.
Attributes
----------
edges:list
This is a list of all edges in the graph instance
connections:dict<defaultdict>
A dictionary thats maps each node to its neighbor with an edge, a node
in the connection dictionary contains a default dict and in the default dict
the adjacent node is mapped with its edge object and if there is no relationship
with the two node 0 will be returned.
graph_matrix: numpy.array()
this is a adjacent matrix form of the graph, it is read only.
get_node: list
lits of all the node object in the graph, read only.
"""
def __init__(self, max_node=float("inf"), max_edge=float("inf"), type=SCALAR, ref="value"):
self.__max_node = max_node
self.max_edge = max_edge
self.type = type
self.ref = ref
self.edges = []
self.connections = {}
self.__nodes = []
self.__node_map = {}
def __len__(self):
return len(self.__nodes)
def __iter__(self):
self.__counter = 0
return self
def __next__(self):
if self.__counter < self.__len__():
res = self.__nodes[self.__counter]
self.__counter += 1
return res
else:
raise StopIteration
def __getitem__(self, key):
return self.__node_map[key]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
def add_node(self, node):
"""
Adds an arbitrary object or Node object to the graph,
all nodes added should be unique.
parameters
----------
node: str, int, float, Node
"""
if self.__len__() <= self.__max_node:
if type(node) in [str, int, float]:
node = Node(node)
if node not in self.__nodes:
if type(node) == Node or issubclass(node.__class__, Node):
self.__nodes.append(node)
node_id = self.get_node_id(node)
self.__node_map[node_id] = node
else:
raise InvalidNodeTypeError(
"Expected object of type str, float, int, Node or subclass of Node but %s was given." % type(node))
self.connections[node_id] = defaultdict(int)
else:
raise ValueError("Node instance already in graph network")
else:
raise MaxNodeError(
"Graph max size exceeded, expected %d node." % (self.__max_node))
def add_edge(self, _from, _to, weight=1):
"""
Adds a edge to the graph connecting the _from and _to nodes,
the values passed to the parameters _from and _to must be a
node in the graph or a node in the graph must contain the
value passed in.
if the type of the graph object is scalar an edge will be added
from _to to _from.
parameters
----------
_from:int,str,float,Node
this is the source from where the edge starts from
_to:int,str,float,Node
the destination of the edge
weight:int, str, float, default=1
this is the weight of the edge object connecting the nodes
the default value is 1.
"""
if len(self.connections) <= self.max_edge:
if type(_from) == Node or issubclass(_from.__class__, Node):
_from = self.get_node_id(_from)
if type(_to) == Node or issubclass(_to.__class__, Node):
_to = self.get_node_id(_to)
node = self[_from]
node_to = self[_to]
edge = Edge(node, node_to, weight)
self.connections[_from][_to] = edge
node.add_node(node_to)
self.edges.append(edge)
if self.type == SCALAR:
edge2 = Edge(_to, _from, weight)
self.connections[_to][_from] = edge2
node_to.add_node(node)
self.edges.append(edge2)
else:
raise MaxNodeError(
"Graph max size exceeded, expected %d node." % (self.max_edge))
def add_nodes_from_iterable(self, iterable):
"""
Adds nodes from a an iterable to the graph.
Parameters
----------
iterable:iter
an iterable object containing nodes to be added to the graph
"""
for node in iterable:
self.add_node(node)
def add_edges_from_iterable(self, iterable):
"""
Adds nodes edge a an iterable to the graph.
Parameters
----------
iterable:iter
an iterable object containing a iterable with size of 3 or 2
where the first two values are values of nodes in the graph
and if there's a third value it will be used as the weight of
the edge to be added to the graph
"""
for e in iterable:
self.add_edge(*e)
@property
def graph_matrix(self):
nodes = self.__nodes
self.__adj_matrix = np.zeros((self.__len__(), self.__len__()))
for i in range(self.__len__()):
id_i = self.get_node_id(nodes[i])
for j in range(self.__len__()):
id_j = self.get_node_id(nodes[j])
edge = self.connections[id_i][id_j] if not self.connections[id_i][id_j] else self.connections[id_i][id_j].weight
self.__adj_matrix[i, j] = edge
return self.__adj_matrix
@property
def get_nodes(self):
return self.__nodes
def from_dict(self, dictionary, weights=1):
"""
creates and add node from a dictionary. The dictionary passed in must
have a key with an iterable value which contains all adjacents node, and
node in the iterable must be in the dictionary or graph. The weight will be
uniform for all nodes.
Parmeters
---------
dictionary: dict
dictionary object thats holds weights and their connections
weights: int, str, float
uniform weight passed to all edges.
"""
self.add_nodes_from_iterable(list(dictionary.keys()))
for key in dictionary:
for edge in dictionary[key]:
self.add_edge(key, edge, weights)
def is_cyclic(self):
"""
Checks if the graph contains a cycle, if the graph type is scalar
returns
-------
bool
"""
if self.type != VECTOR:
raise GraphTypeError(
"cyclic check only works for vector type graphs")
visited = {}
rec_stack = {}
for node in self:
visited[node] = False
rec_stack[node] = False
for source in self:
if check_cycle(source, visited, rec_stack):
return True
return False
def clear(self):
"""
Clears all the graph content
"""
self.__nodes.clear()
self.connections = {}
self.edges.clear()
def is_connected(self):
"""
Checks if a scalar graph is connected or not.
returns
-------
bool
"""
if self.type == SCALAR:
traverse = self.BFS()
for node in self.__nodes:
if not node in traverse:
return False
return True
else:
raise GraphTypeError(
"Invalid graph type %s expected scalar" % (self.type))
def display(self, weighted=False, weight_color=False, arrow_color=True, layout="polygon", polygon_radius=5, attr=None, ax=None):
"""
creates a plot of the graph.
parameters
----------
weighted:bool, optional, default=False
if true weights will be displayed.
weight_color:bool, optional, default=False
if true the weights will be colored with the edge object color attribute.
arrow_color:bool, optional, default=True
if true the arrow will be colored with the edge object color attribute.
layout: optional, {random, polygon}, default=polygon
The layout used to arrange the nodes in the plot
polygon_radius: int, float, default=5
if polygon layout is used this defines the radius of the polygon shape.
attr:None, str, int, object, default=None
The attribute of the Node object to be used as label. if omitted the default value
will be the ref attribute of the graph.
ax:.axes.Axes, default=None
An axis object to plot the graph, if not specified a default axis will be created.
"""
if attr == None:
attr = self.ref
if not ax:
_, ax = plt.subplots(1, figsize=(20, 20))
if self.type == VECTOR:
plot_graph_directed(self, ax, len(
self), weighted, weight_color, arrow_color, layout, polygon_radius, attr)
elif self.type == SCALAR:
plot_graph_undirected(self, ax, len(
self), weighted, weight_color, arrow_color, layout, polygon_radius, attr)
def remove_edge(self, _from, _to):
"""
removes edge connecting to nodes if it exist.
Parameters
----------
_from:int, str, float
_to:int, str, float
"""
del self.connections[_from][_to]
for i in range(len(self.edges)):
if self.edges[i]._from == _from and self.edges[i]._to == _to:
self.edges.pop(i)
break
src = self[_from]
dest = self[_to]
for i in range(len(src.adjacent_nodes)):
if dest == src.adjacent_nodes[i]:
src.adjacent_nodes.pop(i)
break
def get_node_id(self, node):
"""
gets the value of the node object used in identifying
the node
Parameters
----------
node:Node
returns
-------
value:str, int, float, object
"""
for vertex in self.__nodes:
if vertex == node:
v = node
break
return eval('v.%s' % self.ref)
def topological_sort(self):
"""
Sorts the nodes if graph type is vector
returns
-------
res:List
a list containing all the node in the graph instance
in a sorted form
"""
return topological_sort(self)
def BFS(self, source=None, key=None):
"""
An Impelemetaion of breath-first-search for tranversing a
graph of getting a node.
Parameters
----------
source:int, str, float, optional
Value of the node to start the tranverse. if omitted the method
uses a random node as source.
key:int, str, float, optional
Value of the node to stop the tranverse. if omitted the method
stops when all node in the graph are tranversed.
returns
-------
value: list
A list of the path tranversed in order from source to key.
"""
return BFS(self, source, key)
def DFS(self, source=None, key=None):
"""
An Impelemetaion of depth-first-search for tranversing a
graph of getting a node.
Parameters
----------
source:int, str, float, optional
Value of the node to start the tranverse. if omitted the method
uses the first node in the graph as source.
key:int, str, float, optional
Value of the node to stop the tranverse. if omitted the method
stops when all node in the graph are tranversed.
returns
-------
value: list
A list of the path traversed in order from source to key.
"""
return DFS(self, source, key)
class GraphPriorityQueue:
"""
A priority queue data structure which stores node
and get them based on priority. if state is true nodes
will be gotten by priority and if their state value evaluate
to true.
Parameters
----------
graph: Graph
graph object whose node will be stored in the queue.
type: {"min", "max"}, default="min"
the type of priority based on how the values in the
graph will be retrived, if min the smallest value will
be prioritized and if max the largest value will be prioritized.
state: boolean, default=False
this specifies if state will be used in the queue. If true an
attribute status will be created to hold the states of all the
nodes.
Attributes
---------
type: {"min", "max"}, default="min"
queue: dict
this holds node and value pair, the value is what is used in
as priority in getting items.
status: dict
if state is true this will be created and hold a dictionary of
nodes mapped with boolean.
"""
def __init__(self, graph, type="min", state=False):
if type == 'min' or type == 'max':
self.type = type
else:
raise TypeError("type attribute most be 'min' or 'max'")
self.__graph = graph
self._top = float('inf') if self.type == 'min' else -float('inf')
self.queue = dict()
self.__state = state
for node in self.__graph:
self.queue[node] = float('inf')
if self.__state:
self.status = dict()
for node in self.__graph:
self.status[node] = False
def __repr__(self):
return str(list(self.queue.values()))
def get(self):
"""
gets a node based on priority if the queue isn't empty
returns: Node
if the queue isn't empty returns a Node object else it
returns None.
"""
if self.__state:
if all(self.status.values()):
return None
if len(self.queue) != 0:
sign = "<=" if self.type == "min" else ">=" if self.type == "max" else ''
state = '' if not self.__state else 'and (not self.status[node])'
for node in self.queue:
if eval("(self.queue[node] %s self._top %s)" % (sign, state)):
self._top = self.queue[node]
res = node
del self.queue[res]
if self.__state:
del self.status[res]
self._top = float('inf') if self.type == 'min' else -float('inf')
return res
return None
def set_status(self, node, value):
"""
Tries to set the status of the node if state is true
Parameters
----------
node: Node
node object to change state
value: boolean
value to change the node state.
"""
assert (value == True or value == False) and (self.__state)
self.status[node] = value
def enqueue(self, node, value=0):
"""
Inserts a node to the queue with the value to be used for
prioritizing.
Parameters
----------
node: Node
node object to insert
value:int, float, default=0
value to be used for prioritizing the node object.
"""
self.queue[node] = value
def is_empty(self):
"""
Checks if the queue is empty
returns
-------
out:boolean
if True queue is empty else it's not.
"""
if len(self.queue) != 0:
return False
return True
| Fredpwol/graphnet | graphnet/graph.py | graph.py | py | 18,096 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "graphnet.SCALAR",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "components.Node",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "components.Node",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "exceptions.InvalidN... |
13006443464 | from telethon import TelegramClient, events, sync,utils
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.functions.messages import GetDialogsRequest
from telethon.tl.types import InputPeerEmpty, InputPeerChannel, InputPeerUser
from telethon.errors.rpcerrorlist import (PeerFloodError, UserNotMutualContactError ,
UserPrivacyRestrictedError, UserChannelsTooMuchError,
UserBotError, InputUserDeactivatedError)
from telethon.tl.functions.channels import InviteToChannelRequest
import time,os,random,csv,sys
r= "\u001b[31;1m"
a= "\u001b[32m"
y = "\u001b[33;1m"
b="\u001b[34;1m"
m="\u001b[35;1m"
c="\u001b[36;1m"
clear = lambda:os.system('clear')
__banner__ = a+"""*\t🔰DARKNET_OFF1CIAL🔰*
xxxx xxxx
x x x x
x x x x
xx xx
x x x x
x x x x
x x x x
x xxx x xxx
x xxxx x xxxx
x xxxx x xxxx
xxx xxx xxx
x x
x x
xx xxx xx
xx xx
xxx xxx
xx xx
xx xx
xxxx xxxx
xxx xxx
xxxx xxx
\tᎢᗴᖇᗰᑌ᙭ ᑌᏃ ᑭᖇᏆᐯᗩᎢᗴ"""
inf = (b+'@'+y+'d'+a+'a'+b+'r'+y+'k'+m+'n'+c+'e'+r+'t'+y+'_'+y+'o'+a+'f'+b+'f'+y+'1'+m+'c'+c+'a'+r+'l')
el=0
def Sleep(timE):
try:
time.sleep(timE)
except KeyboardInterrupt:
print(r+" KeyboardInterrupt , ........")
def info():
print("")
print("")
print(__banner__)
print(inf)
print("")
print("")
clear()
info()
def ospath():
o=int(input(b+" Nechta telegram akauntingiz bor ? : "))
for po in range(o):
if os.path.isfile('multi_log.txt'):
with open('multi_log.txt', 'r') as f:
data = f.readlines()
v=int(len(data)/2)
z=v
else:
z=0
api_id= input(b+' Telegram api_id_{}: '.format(z+1))
api_hash= input(' Telegram api_hash_{}: '.format(z+1))
with open('multi_log.txt', 'a') as f:
f.write(api_id+'\n'+api_hash+'\n')
client = TelegramClient("DarknetHack{}".format(z), api_id, api_hash)
client.start()
Sleep(1)
clear()
info()
client.disconnect()
if os.path.isfile('multi_log.txt'):
xc=input(b+" Oxirgi amalni olib tashlamowchimisiz "+a+" (h/y) ? ")
if xc=='h':
cy=input(" Koproq akaunt qoshmoqchimisiz "+a+" (h/y) ? ")
if cy=='h':
ospath()
else:
pass
else:
cv=input(" Oxirgi amalni olib tashlamoqchimisiz "+a+" (h/ny) ? ")
if cv=='h':
with open('multi_log.txt', 'r') as f:
data = f.readlines()
v=int((len(data))/2)
con=input(r+" Oxirgi amal bn bogliq malumotlarni delete qilishga rozimisiz "+a+" (h/y) ? ")
if con in ['', 'y']:
print(m+" chiqilmoqda..."+'\n'+a+"hech qaysi fayl delete qilinmado ! ")
sys.exit(1)
elif con=='y':
print(r+ " Endi ohirgi amalga bogliq bolgan fayllar delete qilinmoqda..")
Sleep(1)
for d in range(v-1):
os.remove("DarknetHack{}.session".format(d))
os.remove('multi_log.txt')
ospath()
else:
sys.exit()
else:
ospath()
clear()
info()
x=0
inh=2
t=0
with open('multi_log.txt', 'r') as f:
data = f.readlines()
v=int(len(data)/2)
for s in range(v):
api_id = data[t]
api_hash = data[t+1]
print(a+ ' \nAkauntga ulanish kutilmoqda.. {} \n'.format(x+1)+y+ ' \n api {}= '.format(x+1) +m+ api_id +'\n' +y+ ' api hash {} = '.format(x+1) +m+ api_hash)
Sleep(1)
client = TelegramClient("DarknetHack{}".format(x), api_id, api_hash)
client.start()
name=utils.get_display_name(client.get_me())
print(a+" \n\n Akauntga ulandi {}\n\n".format(name))
t+=2
lines=[]
chats = []
last_date = None
chunk_size = 200
groups=[]
result = client(GetDialogsRequest(
offset_date=last_date,
offset_id=0,
offset_peer=InputPeerEmpty(),
limit=chunk_size,
hash = 0
))
chats.extend(result.chats)
for chat in chats:
try:
if chat.megagroup==True:
groups.append(chat)
except:
continue
print(b+' Qaysi grupadan odam olmoqchisiz:')
i=0
for g in groups:
print(m+str(i) +y+ ' - '+a + g.title)
i+=1
g_index = input(b+' Raqamni tanlang (Enter bosib otqazib yuborish): ')
if g_index == '' :
info()
print(m+" Yahshi. Otqazib yuborildi...")
Sleep(1)
else:
info()
target_group=groups[int(g_index)]
print(y+' Azolwr uhlatilmoqda...')
all_participants = []
all_participants = client.get_participants(target_group)
print(y+' Hotiraga saqlanmoqda...')
with open("Members.csv","w",encoding='UTF-8') as f:
writer=csv.writer(f,delimiter=",",lineterminator="\n")
for user in all_participants:
if user.username:
username= user.username
else:
username= ""
if user.first_name:
first_name= user.first_name
else:
first_name= ""
if user.last_name:
last_name= user.last_name
else:
last_name= ""
name= (first_name + ' ' + last_name).strip()
writer.writerow([username,user.id,user.access_hash,name,target_group.title, target_group.id])
print(a+' Azolar muvafaqiyatli kochirildi.')
Sleep(1)
info()
print(b+'Qayso guruhga kochirmoqchisiz:')
i=0
for group in groups:
print(m+str(i) +y+ ' - ' +a+ group.title)
i+=1
g_index = input(b+' Raqamni kiriting: ')
if g_index=='':
print(m+" \n U 've Enter bosing va chiqing...")
sys.exit()
users = []
with open('Members.csv', encoding='UTF-8') as f:
rows = csv.reader(f,delimiter=",",lineterminator="\n")
for row in rows:
lines.append(row)
user = {}
user['username'] = row[0]
user['id'] = int(row[1])
user['name'] = row[3]
users.append(user)
my_participants = client.get_participants(groups[int(g_index)])
target_group=groups[int(g_index)]
target_group_entity = InputPeerChannel(target_group.id,target_group.access_hash)
my_participants_id = []
for my_participant in my_participants:
my_participants_id.append(my_participant.id)
info()
n,q=0,0
for user in users:
usR=str(user['id'])
n += 1
if n % 20 == 0:
info()
print (y+' waiting for 10 seconds to avoid flooding....')
Sleep(10)
elif q>= 9:
client.disconnect()
if x<v:
x+=1
inh+=1
break
else:
print(b+" Boshqa odamlar topilmadi...")
Sleep(1)
sys.exit()
if user['id'] in my_participants_id:
print(a+' Bu odan allaqachon bor ekan...')
n-=1
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
Sleep(1)
continue
else:
try:
print (a+' Qoshish {}'.format(user['name']))
if True :
if user['username'] == "":
continue
user_to_add = client.get_input_entity(user['username'])
client(InviteToChannelRequest(target_group_entity,[user_to_add]))
print(m+" 2-4 sekund kuting...")
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
with open("Members.csv","w",encoding='UTF-8') as f:
writer=csv.writer(f,delimiter=",",lineterminator="\n")
for line in lines:
writer.writerow(line)
time.sleep(random.randrange(2,4))
q=0
except PeerFloodError:
print(r+' Afsusku Spamm ekansiz spammdan chiqishiz bilan qayta harakat qib koring yoki boshqa akaunt kiriting.')
Sleep(1)
q+= 1
except UserPrivacyRestrictedError:
print(r+' Bu user\'s Nastroykasidan Maxfiylikni yoqqan ekan qosha olmadik.')
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
Sleep(1)
except UserBotError:
print(r+' Botlarni\'t Otqazib yuborish...')
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
except InputUserDeactivatedError:
print(r+' Belgilangan foydalanuvchi ochirildi. Otqazib yuborilmoqda...')
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
Sleep(1)
except UserChannelsTooMuchError:
print(r+' Foydalanuvchi juda kop kanallar va guruhlarga wushilhan afsusku uni bu grupaga qushomemiz.')
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
Sleep(1)
except UserNotMutualContactError:
print(r+' ozaro raqam otqazib yuborildi.')
with open('Members.csv', 'r',encoding='UTF-8') as f:
dat = csv.reader(f,delimiter=",",lineterminator="\n")
for tad in dat:
if usR in tad:
lines.remove(tad)
break
Sleep(1)
except KeyboardInterrupt:
i=0
kst=["stop","continue","switch to next account"]
for ks in kst:
print('\n'+m+ str(i) +y+ ' - ' +a+ ks)
i+=1
keyb=int(input(y+" Raqam kiriting : "))
if keyb==1:
print(a+" yahshi daom etamiz...")
Sleep(1)
elif keyb==0:
print(y+" chiqilmoqda...")
sys.exit(1)
else:
print(a+ " \n\nkeyingi akauntga otish kutulmoqda...\n\n")
x+=1
break
except Exception as e:
print(r+' Hatolik:', e)
print('Davom etilmoqda...')
q += 1
Sleep(1)
continue
| xiroshigo/TgMemberAdd | TgMemberAdder.py | TgMemberAdder.py | py | 12,767 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.system",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
... |
5592598850 | from rest_framework import serializers
from api.models import OauthRecord
class OauthRecordSerializer(serializers.ModelSerializer):
class Meta:
model = OauthRecord
exclude = ('id', 'user')
def create(self, data):
instance = OauthRecord.objects.create(
user=data['user'],
provider=data['provider'],
uid=data['uid']
)
return instance
| johnny-butter/eLibrary | api/serializers/oauth_record.py | oauth_record.py | py | 421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "api.models.OauthRecord",
"line_number": 8,
"usage_type": "name"
... |
21794665224 | from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from webdriver_manager.firefox import GeckoDriverManager
import requests
driver = webdriver.Firefox(service=Service(GeckoDriverManager().install()))
driver.get("https://en.wikipedia.org/wiki/SpaceX")
wikipedia_xpath="/html/body/div[3]/div[3]/div[5]/div[1]/table[1]/tbody/tr[9]/td/div/ul/li[1]/a[1]"
wikipedia_spacexCEO=driver.find_element("xpath",wikipedia_xpath).text
driver.quit()
query="""query{
company {
ceo
}
}
"""
response=requests.post("https://api.spacex.land/graphql/", json={'query':query})
print(response.status_code)
api_spacexCEO=response.json()
print("CEO SpaceX menurut Wikipedia",wikipedia_spacexCEO)
print('CEO SpaceX menurut api.spacex.land/graphql',api_spacexCEO)
print("Apakah CEO SpaceX pada wikipedia sama dengan api.spacex.land ?")
print(wikipedia_spacexCEO==api_spacexCEO["data"]["company"]["ceo"])
| RianNugroho/DSTakeHome | main.py | main.py | py | 923 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.firefox.service.Service",
"line_number": 6,
"usage_type": "call"
},
{
... |
73004553633 | from .backend import backend as bd
import numpy as np
import os
import time
import fdtd_1d as f
import matplotlib.pyplot as plt
from .constants import c0, BLUE, CYAN, TEAL, ORANGE, RED, MAGENTA, GREY
from werkzeug.utils import cached_property
from multiprocessing import Pool
color_spec = [BLUE, CYAN, TEAL, ORANGE, RED, MAGENTA, GREY]
# Implementation of a dielectric slab
def theory_dielectric_slab_complex(grid):
d = ((grid.materials[0].position[-1] - grid.materials[0].position[0] + 1)) * grid.dx
omega = grid.sources[0].omega
n_real = np.sqrt((np.abs(grid.materials[0].epsilon_complex(omega)) + grid.materials[0].epsilon_real(omega)) / 2)
kappa = np.sqrt((np.abs(grid.materials[0].epsilon_complex(omega)) - grid.materials[0].epsilon_real(omega)) / 2)
n = n_real + 1j*kappa
k0 = grid.sources[0].omega / c0
k = n*k0
q = ((n - 1) ** 2) / ((n + 1) ** 2) * np.exp(2j * k * d)
e_inc = grid.sources[0].ampl
e_tr = e_inc * (2 / (n + 1)) * (2 * n / (n + 1)) * (1 / (1 - q)) * np.exp(1j * (k - k0) * d)
theo_amplitude = np.abs(e_tr)
theo_phasenunterschied = np.angle(e_tr)
return theo_amplitude, theo_phasenunterschied
def theory_dielectric_slab(grid):
d = ((grid.materials[0].position[-1] - grid.materials[0].position[0] + 1)) * grid.dx
n = np.sqrt(grid.eps[grid.materials[0].position[1]])
k0 = grid.sources[0].omega / c0
k = n * k0
q = ((n - 1) ** 2) / ((n + 1) ** 2) * np.exp(2j * k * d)
e_inc = grid.sources[0].ampl
e_tr = e_inc * (2 / (n + 1)) * (2 * n / (n + 1)) * (1 / (1 - q)) * np.exp(1j * (k - k0) * d)
theo_amplitude = np.abs(e_tr)
theo_phasenunterschied = np.angle(e_tr)
return theo_amplitude, theo_phasenunterschied
class benchmark:
'''Parent class for all benchmarks. Declares path/directory to store data'''
def __init__(self, name, benchmark_type):
self.name = name
self.benchmark_type = benchmark_type
self.dir_path = None
self.grids = []
def allocate_directory(self):
self.dir_path = os.path.join(os.path.dirname(__file__), 'saved_data/'+self.benchmark_type+'/'+self.name)
os.mkdir(path=self.dir_path)
def store_obs_data(self):
self.allocate_directory()
for grid in self.grids:
grid.store_obs_data(benchmark=True, benchmark_name=self.name)
class Harmonic_Slab_Lorentz_Setup(benchmark):
def __init__(self, name, dx, length_grid_in_dx, length_media_in_dx, start_index_media, wavelength, ampl, conductivity, eps_inf, gamma, w0, chi_1, chi_2, chi_3, timesteps, courant=1):
super().__init__(name=name, benchmark_type='harmonic_lorentz_slab')
self.start_media = start_index_media
self.dx = dx
self.indices = []
self.grids = []
for grid in range(len(self.dx)):
self.indices.append([start_index_media + 2 + i for i in np.arange(0, length_media_in_dx[grid] - 1)])
self.grids.append('grid_'+str(grid))
self.eps_inf = eps_inf
self.Nx = length_grid_in_dx
self.length_media = np.array(length_media_in_dx)
self.lamb = wavelength
self.timesteps = timesteps
self.courant = courant
self.N_lambda = np.zeros(len(self.dx)) #[[] for _ in range(len(self.dx))]
self.ampl = ampl
self.conductivity = conductivity
self.gamma = gamma
self.w0 = w0
self.chi_1 = chi_1
self.chi_2 = chi_2
self.chi_3 = chi_3
self.eps_real = None
self.eps_imag = None
self.eps_complex = None
self.n_real = None
self.wo_phase_merged = np.zeros(len(self.dx))
self.theo_phasenunterschied_merged = np.zeros(shape=(len(self.dx), np.max(self.length_media)-1))
self.theo_amplitude_merged = np.zeros(shape=(len(self.dx), np.max(self.length_media)-1))
self.exp_phase_merged = np.zeros(shape=(len(self.dx), np.max(self.length_media)-1))
self.exp_amplitude_merged = np.zeros(shape=(len(self.dx), np.max(self.length_media)-1))
def _grid_wo_slab(self):
position_src = self.start_media - 1
for grid in range(len(self.dx)):
position_obs = self.Nx[grid] - 3
end_mur = self.Nx[grid] - 1
wo_grid = f.Grid(self.Nx[grid], dx=self.dx[grid], courant=self.courant)
if wo_grid.courant == 1:
wo_grid[position_src] = f.ActivatedSinus(name='SinsquaredActivated', wavelength=self.lamb,
carrier_wavelength=(self.lamb * 30), phase_shift=0,
amplitude=self.ampl, tfsf=True)
else:
wo_grid[position_src] = f.ActivatedSinus(name='SinsquaredActivated', wavelength=self.lamb,
carrier_wavelength=(self.lamb * 30), phase_shift=0,
amplitude=self.ampl, tfsf=False)
wo_grid[position_obs] = f.QuasiHarmonicObserver(name='firstobserver', first_timestep=self.timesteps[grid]-200)
if wo_grid.courant == 0.5:
wo_grid[0] = f.LeftSideGridBoundary()
wo_grid[end_mur] = f.RightSideGridBoundary()
else:
wo_grid[0] = f.LeftSideMur()
wo_grid[end_mur] = f.RightSideMur()
wo_grid.run_timesteps(self.timesteps[grid], vis=False)
self.wo_phase_merged[grid] = wo_grid.local_observers[0].phase
def _grids_w_slab(self):
position_src = self.start_media - 1
for grid in range(len(self.dx)):
position_obs = self.Nx[grid] - 3
end_mur = self.Nx[grid] - 1
for ind_media, ind_array in zip(self.indices[grid], range(self.length_media[grid])):
# Step 1: init grid
w_grid = 'slab' + str(ind_media)
w_grid = f.Grid(nx=self.Nx[grid], dx=self.dx[grid], courant=self.courant)
# Step 2: init media
w_grid[self.start_media:ind_media] = f.LorentzMedium(name='media', permeability=1, eps_inf=self.eps_inf, conductivity=self.conductivity, gamma=self.gamma, chi_1=self.chi_1, chi_2=self.chi_2, chi_3=self.chi_3, w0=self.w0)
# Step 3: init source
if w_grid.courant == 1:
w_grid[position_src] = f.ActivatedSinus(name='SinsquaredActivated', wavelength=self.lamb,
carrier_wavelength=(self.lamb * 30), phase_shift=0,
amplitude=self.ampl, tfsf=True)
else:
w_grid[position_src] = f.ActivatedSinus(name='SinsquaredActivated', wavelength=self.lamb,
carrier_wavelength=(self.lamb * 30), phase_shift=0,
amplitude=self.ampl, tfsf=False)
# Step 4: add observer
w_grid[position_obs] = f.QuasiHarmonicObserver(name='firstobserver', first_timestep=self.timesteps[grid]-200)
# Step 5: add boundaries
if w_grid.courant == 0.5:
w_grid[0] = f.LeftSideGridBoundary()
w_grid[end_mur] = f.RightSideGridBoundary()
else:
w_grid[0] = f.LeftSideMur()
w_grid[end_mur] = f.RightSideMur()
# Step 6: run simulation
w_grid.run_timesteps(timesteps=self.timesteps[grid], vis=False)
# Step 7: misc
self.exp_amplitude_merged[grid][ind_array] = w_grid.local_observers[0].amplitude
self.exp_phase_merged[grid][ind_array] = w_grid.local_observers[0].phase
self.theo_amplitude_merged[grid][ind_array] = theory_dielectric_slab_complex(w_grid)[0]
self.theo_phasenunterschied_merged[grid][ind_array] = theory_dielectric_slab_complex(w_grid)[1]
# if list self.eps_real is empty:
if self.eps_real is None:
self.eps_real = w_grid.materials[0].epsilon_real(w_grid.sources[0].omega)
self.eps_imag = w_grid.materials[0].epsilon_imag(w_grid.sources[0].omega)
self.eps_complex = w_grid.materials[0].epsilon_complex(w_grid.sources[0].omega)
self.n_real = np.sqrt((np.abs(self.eps_complex) + self.eps_real)/2)
def _visualize(self):
max_resolution_grid = int(np.argmax(self.N_lambda))
fig, axes = plt.subplots(2, 2)
axes[0][0].grid(True, linestyle=(0, (1, 5)), color=GREY, linewidth=1)
axes[0][1].grid(True, linestyle=(0, (1, 5)), color=GREY, linewidth=1)
axes[1][0].grid(True, linestyle=(0, (1, 5)), color=GREY, linewidth=1)
axes[1][1].grid(True, linestyle=(0, (1, 5)), color=GREY, linewidth=1)
axes[0][0].set_xlabel('width in m', fontsize=14)
axes[0][1].set_xlabel('width in m', fontsize=14)
axes[1][0].set_xlabel('width in m', fontsize=14)
axes[1][1].set_xlabel('width in m', fontsize=14)
axes[0][0].set_ylabel('transmitted amplitude ' + r'$E_{z,tr}$', fontsize=14)
axes[1][0].set_ylabel('Phasenunterschied', fontsize=14)
axes[1][1].set_ylabel(r'$d(\phi_{exp},\phi_{theo})$', fontsize=14)
axes[0][1].set_ylabel(r'$E_{tr,theo}$ / $E_{tr,FDTD}$', fontsize=14)
axes[0][0].plot(np.array((np.array(self.indices[max_resolution_grid])) - self.start_media) * self.dx[max_resolution_grid],
self.theo_amplitude_merged[max_resolution_grid][0:self.length_media[max_resolution_grid] - 1], label='Theorie', color=ORANGE)
axes[1][0].plot(np.array((np.array(self.indices[max_resolution_grid])) - self.start_media) * self.dx[max_resolution_grid],
self.theo_phasenunterschied_merged[max_resolution_grid][0:self.length_media[max_resolution_grid] - 1], color=ORANGE,
label='Theorie')
for grid in range(len(self.dx)):
axes[0][0].plot(np.array((np.array(self.indices[grid])) - self.start_media) * self.dx[grid], self.exp_amplitude_merged[grid][0:self.length_media[grid] - 1], color=color_spec[grid], linestyle='dashed', label=r'$N_{\lambda}=$' + '{0:.3}'.format(self.N_lambda[grid]))
axes[0][1].plot(np.array((np.array(self.indices[grid])) - self.start_media) * self.dx[grid], self.theo_amplitude_merged[grid][0:self.length_media[grid] - 1]/self.exp_amplitude_merged[grid][0:self.length_media[grid] - 1], color=color_spec[grid], linestyle='dashed', label=r'$N_{\lambda}=$' + '{0:.3}'.format(self.N_lambda[grid]))
axes[1][0].plot(np.array((np.array(self.indices[grid])) - self.start_media) * self.dx[grid], self.get_exp_phasedifference[grid][0:self.length_media[grid] - 1], color= color_spec[grid], linestyle='dashed', label=r'$N_{\lambda}=$' + '{0:.3}'.format(self.N_lambda[grid]))
axes[1][1].plot(np.array((np.array(self.indices[grid])) - self.start_media) * self.dx[grid], np.abs(self.get_exp_phasedifference[grid][0:self.length_media[grid] - 1] - self.theo_phasenunterschied_merged[grid][0:self.length_media[grid] - 1]), color=color_spec[grid], label=r'$N_{\lambda}=$' + '{0:.3}'.format(self.N_lambda[grid]))
axes[0][0].legend(loc='best')
axes[0][1].legend(loc='best')
axes[1][0].legend(loc='best')
axes[1][1].legend(loc='best')
plt.show()
@cached_property
def get_exp_phasedifference(self):
phase_diff = np.zeros(shape=(len(self.dx), np.max(self.length_media)-1))
for grid in range(len(self.dx)):
phase_diff[grid][0:self.length_media[grid] - 1] = -np.array(self.exp_phase_merged[grid][0:self.length_media[grid] - 1]) + self.wo_phase_merged[grid]
mask = (phase_diff[grid][:]) > np.pi
phase_diff[grid][mask] -= 2*np.pi
return phase_diff
def _set_N_lambda(self):
for grid in range(len(self.dx)):
self.N_lambda[grid] = self.lamb/(self.dx[grid]*self.n_real)
def store_obs_data(self):
self.allocate_directory()
file_grid_informations = os.path.join(self.dir_path, 'info.npy')
file_width_in_dx = os.path.join(self.dir_path, 'width.npy')
file_theory_ampl = os.path.join(self.dir_path, 'theory_ampl.npy')
file_theory_phase = os.path.join(self.dir_path, 'theory_phase.npy')
file_exp_ampl = os.path.join(self.dir_path, 'exp_ampl.npy')
file_exp_phase = os.path.join(self.dir_path, 'exp_phase.npy')
grid_informations = np.zeros(shape=(len(self.grids), 4))
width_in_dx = np.zeros(shape=(len(self.grids), np.max(self.length_media)))
for grid in range(len(self.grids)):
width_in_dx[grid][0:self.length_media[grid] - 1] = np.array(self.indices[grid]) - self.start_media
grid_informations[grid][0] = self.dx[grid]
grid_informations[grid][1] = self.timesteps[grid]
grid_informations[grid][2] = self.N_lambda[grid]
grid_informations[grid][3] = self.length_media[grid]
np.save(file_grid_informations, arr=grid_informations)
np.save(file_width_in_dx, arr=width_in_dx)
np.save(file_theory_ampl, arr=self.theo_amplitude_merged)
np.save(file_theory_phase, arr=self.theo_phasenunterschied_merged)
np.save(file_exp_ampl, arr=self.exp_amplitude_merged)
np.save(file_exp_phase, arr=self.get_exp_phasedifference)
def run_benchmark(self):
start_time = time.time()
self._grid_wo_slab()
self._grids_w_slab()
self._set_N_lambda()
print("computed in --- %s seconds ---" % (time.time() - start_time))
self._visualize()
class QPM_Length_multiple_chi_2(benchmark):
def __init__(self, number_of_lambdas, timesteps, name, peak_timestep, pulse_duration, number_of_distributed_observer, chi2, multi=True):
super().__init__(name=name, benchmark_type='qpm_harmonic_length_multiple')
self.no_of_lambdas = number_of_lambdas
self.half_qpm = 737
self.chi_2 = chi2
self.multi = multi
self.dx = 4e-09
self.dt = 0.5*self.dx/c0
self.timesteps = timesteps
self.nx = self.no_of_lambdas * (2*self.half_qpm + 1) + 10 # 10 is just a buffer to place boundarys and src
self.start_media = 5
self.ending_indices = np.array([self.start_media + i*self.half_qpm for i in range(self.no_of_lambdas*2 + 1)])
self.peak_timestep = peak_timestep
self.pulse_duration = pulse_duration
self.no_observer = number_of_distributed_observer
self.obs_distance = (self.nx - 10)//self.no_observer
self.obs_positions = np.array([5 + i*self.obs_distance for i in range(self.no_observer)])
def _allocate_memory(self):
self.grid_information = np.array([self.dt, self.dx, self.no_of_lambdas, self.peak_timestep, self.pulse_duration])
self.relative_observer_pos = self.obs_positions - self.start_media
def store_obs_data(self):
start_time = time.time()
self.allocate_directory()
self._allocate_memory()
file_grid_info = os.path.join(self.dir_path, 'info.npy')
file_relative_pos = os.path.join(self.dir_path, 'relative_ind.npy')
file_data = os.path.join(self.dir_path, 'E_data.npy')
file_chi_info = os.path.join(self.dir_path, 'chi2.npy')
np.save(file_grid_info, arr=self.grid_information)
np.save(file_relative_pos, arr=self.relative_observer_pos)
np.save(file_data, arr=self.observed_data)
np.save(file_chi_info, arr=self.chi_2)
print("saved in --- %s seconds ---" % (time.time() - start_time))
def _create_grids(self, chi2):
print('process initiated')
qpm_grid = f.Grid(nx=self.nx, dx=self.dx, benchmark='qpm_harmonic_length_multiple', courant=0.5)
for indices in range(len(self.ending_indices) - 1):
if indices % 2 == 0:
qpm_grid[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
else:
qpm_grid[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[chi2, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
qpm_grid[3] = f.GaussianImpulseWithFrequency(name='Varin', Intensity=5*10e12, wavelength=1.064e-06, pulse_duration=self.pulse_duration, peak_timestep=self.peak_timestep, tfsf=False)
for pos in self.obs_positions:
qpm_grid[int(pos)] = f.E_FFTObserver(name='Varin', first_timestep=0, second_timestep=self.timesteps - 1)
qpm_grid[0] = f.LeftSideGridBoundary()
qpm_grid[self.nx - 1] = f.RightSideGridBoundary()
# step 6: run simulation
qpm_grid.run_timesteps(timesteps=self.timesteps, vis=False)
observed_grid_data = bd.zeros(shape=(self.no_observer, self.timesteps))
for obs in range(self.no_observer):
observed_grid_data[obs] = qpm_grid.local_observers[obs].observed_E
return observed_grid_data
def run_benchmark(self):
start_time = time.time()
processpool = Pool()
self.observed_data = bd.zeros(shape=(len(self.chi_2), self.no_observer, self.timesteps))
if self.multi:
self.observed_data = bd.stack(processpool.map(self._create_grids, self.chi_2))
else:
for ind_chi, chi in zip(range(len(self.chi_2)), self.chi_2):
self.observed_data[ind_chi] = self._create_grids(self.chi_2[ind_chi])
print("computed in --- %s seconds ---" % (time.time() - start_time))
class QPM_Length(benchmark):
def __init__(self, number_of_lambdas, timesteps, name, peak_timestep, pulse_duration, number_of_distributed_observer):
super().__init__(name=name, benchmark_type='qpm_harmonic_length')
self.no_of_lambdas = number_of_lambdas
self.half_qpm = 737
self.dx = 4e-09
self.timesteps = timesteps
self.nx = self.no_of_lambdas * (2*self.half_qpm + 1) + 10 # 10 is just a buffer to place boundarys and src
self.start_media = 5
self.ending_indices = np.array([self.start_media + i*self.half_qpm for i in range(self.no_of_lambdas*2 + 1)])
self.peak_timestep = peak_timestep
self.pulse_duration = pulse_duration
self.no_observer = number_of_distributed_observer
self.obs_distance = (self.nx - 10)//self.no_observer
self.obs_positions = np.array([5 + i*self.obs_distance for i in range(self.no_observer)])
def _create_grid(self):
qpm_grid = f.Grid(nx=self.nx, dx=self.dx, benchmark='qpm_harmonic_length', courant=0.5)
self.grids.append(qpm_grid)
for indices in range(len(self.ending_indices) - 1):
if indices % 2 == 0:
qpm_grid[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
else:
qpm_grid[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[-30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
qpm_grid[3] = f.GaussianImpulseWithFrequency(name='Varin', Intensity=5*10e12, wavelength=1.064e-06, pulse_duration=self.pulse_duration, peak_timestep=self.peak_timestep, tfsf=False)
for pos in self.obs_positions:
qpm_grid[int(pos)] = f.E_FFTObserver(name='Varin', first_timestep=0, second_timestep=self.timesteps - 1)
qpm_grid[0] = f.LeftSideGridBoundary()
qpm_grid[self.nx - 1] = f.RightSideGridBoundary()
#qpm_grid[0] = f.LeftSideMur()
#qpm_grid[self.nx - 1] = f.RightSideMur()
# step 6: run simulation
qpm_grid.run_timesteps(timesteps=self.timesteps, vis=False)
def _create_grid_mono(self):
qpm_grid_mono = f.Grid(nx=self.nx, dx=self.dx, benchmark='qpm_harmonic_length', courant=0.5)
self.grids.append(qpm_grid_mono)
for indices in range(len(self.ending_indices) - 1):
if indices % 2 == 0:
qpm_grid_mono[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
else:
qpm_grid_mono[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
qpm_grid_mono[3] = f.GaussianImpulseWithFrequency(name='Varin', Intensity=5*10e12, wavelength=1.064e-06, pulse_duration=self.pulse_duration, peak_timestep=self.peak_timestep, tfsf=False)
for pos in self.obs_positions:
qpm_grid_mono[int(pos)] = f.E_FFTObserver(name='Varin', first_timestep=0, second_timestep=self.timesteps - 1)
qpm_grid_mono[0] = f.LeftSideGridBoundary()
qpm_grid_mono[self.nx - 1] = f.RightSideGridBoundary()
#qpm_grid[0] = f.LeftSideMur()
#qpm_grid[self.nx - 1] = f.RightSideMur()
# step 6: run simulation
qpm_grid_mono.run_timesteps(timesteps=self.timesteps, vis=False)
def _allocate_memory(self):
self.observed_data = np.zeros(shape=(self.no_observer, self.timesteps))
self.observed_data_mono = np.zeros(shape=(self.no_observer, self.timesteps))
observer_object_list = np.array(self.grids[0].local_observers)
observer_object_list_mono = np.array(self.grids[1].local_observers)
for obs_ind in range(self.no_observer):
self.observed_data[obs_ind][:] = observer_object_list[obs_ind].observed_E[:]
self.observed_data_mono[obs_ind][:] = observer_object_list_mono[obs_ind].observed_E[:]
self.grid_information = np.array([self.grids[0].dt, self.grids[0].dx, self.no_of_lambdas, self.peak_timestep, self.pulse_duration])
self.relative_observer_pos = self.obs_positions - self.start_media
def store_obs_data(self):
start_time = time.time()
self.allocate_directory()
self._allocate_memory()
file_grid_info = os.path.join(self.dir_path, 'info.npy')
file_relative_pos = os.path.join(self.dir_path, 'relative_ind.npy')
file_data = os.path.join(self.dir_path, 'E_data.npy')
file_data_mono = os.path.join(self.dir_path, 'E_data_mono.npy')
np.save(file_grid_info, arr=self.grid_information)
np.save(file_relative_pos, arr=self.relative_observer_pos)
np.save(file_data, arr=self.observed_data)
np.save(file_data_mono, arr=self.observed_data_mono)
print("saved in --- %s seconds ---" % (time.time() - start_time))
def run_benchmark(self):
start_time = time.time()
self._create_grid()
self._create_grid_mono()
print("computed in --- %s seconds ---" % (time.time() - start_time))
class QPM_end_P(benchmark):
''' reproduces paper QuasiPhaseMatching from Varin's Paper '''
def __init__(self, number_of_lambdas, timesteps, name, peak_timestep, pulse_duration):
super().__init__(name=name, benchmark_type='qpm_harmonic')
self.no_of_lambdas = number_of_lambdas
# Varin parameters
self.half_qpm = 737
self.dx = 4e-09
self.timesteps = timesteps
self.nx = number_of_lambdas * (2*self.half_qpm + 1) + 10
self.start_media = 5
self.ending_indices = np.array([self.start_media + i*self.half_qpm for i in range(self.no_of_lambdas*2 + 1)])
self.peak_timestep = peak_timestep
self.pulse_duration = pulse_duration
def _create_grid(self):
self.position_P_obs = self.nx - 9
self.position_E_obs = self.nx - 8
# step 1: init grid
qpm_grid = f.Grid(nx=self.nx, dx=self.dx, benchmark='qpm_harmonic', courant=0.5)
self.grids.append(qpm_grid)
for indices in range(len(self.ending_indices) - 1):
if indices % 2 == 0:
qpm_grid[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
else:
qpm_grid[self.ending_indices[indices]:self.ending_indices[indices + 1]] = f.LorentzMedium(
name='Varin', permeability=1, eps_inf=1.0, chi_1=[2.42, 9.65, 1.46], chi_2=[-30.e-12, 0, 0],
chi_3=[0, 0, 0], conductivity=0, w0=[1.5494e16, 9.776e13, 7.9514e15], gamma=[0, 0, 0])
qpm_grid[3] = f.GaussianImpulseWithFrequency(name='Varin', Intensity=10e12, wavelength=1.064e-06,
pulse_duration=self.pulse_duration,
peak_timestep=self.peak_timestep, tfsf=True)
qpm_grid[self.position_E_obs] = f.E_FFTObserver(name='Varin', first_timestep=0, second_timestep=self.timesteps - 1)
qpm_grid[self.position_P_obs] = f.P_FFTObserver(name='Varin', first_timestep=0, second_timestep=self.timesteps - 1)
qpm_grid[0] = f.LeftSideGridBoundary()
qpm_grid[self.nx - 1] = f.RightSideGridBoundary()
#qpm_grid[0] = f.LeftSideMur()
#qpm_grid[self.nx - 1] = f.RightSideMur()
# step 6: run simulation
qpm_grid.run_timesteps(timesteps=self.timesteps, vis=True)
def _allocate_memory(self):
self.observed_data = np.zeros(shape=(2, self.timesteps))
self.observed_data[0][:] = self.grids[0].local_observers[0].observed_E[:]
self.observed_data[1][:] = self.grids[0].local_observers[1].observed_P[:]
self.grid_information = np.array([self.grids[0].dt, self.grids[0].dx, self.no_of_lambdas, self.peak_timestep,
self.pulse_duration])
def store_obs_data(self):
start_time = time.time()
self.allocate_directory()
self._allocate_memory()
self.relative_observer_pos = np.array([self.position_E_obs - self.start_media, self.position_P_obs - self.start_media])
file_grid_info = os.path.join(self.dir_path, 'info.npy')
file_relative_pos = os.path.join(self.dir_path, 'relative_ind.npy')
file_data = os.path.join(self.dir_path, 'E_P_data.npy')
np.save(file_grid_info, arr=self.grid_information)
np.save(file_relative_pos, arr=self.relative_observer_pos)
np.save(file_data, arr=self.observed_data)
print("saved in --- %s seconds ---" % (time.time() - start_time))
def run_benchmark(self):
start_time = time.time()
self._create_grid()
print("computed in --- %s seconds ---" % (time.time() - start_time))
class Soliton(benchmark):
''' tries to show the majestic combined effect of GVD and SPM '''
def __init__(self, name, central_wavelength, pulse_duration, intensities, x_to_snapshot, peak_timestep, frame_width_in_dx, dx, multi=True):
super().__init__(name=name, benchmark_type='soliton')
self.central_wavelength = central_wavelength
self.pulse_duration = pulse_duration
self.intensities = np.array(intensities)
self.x_to_snapshot = x_to_snapshot
self.peak_timestep = peak_timestep
self.name = name
self.dx = dx
self.nx = int(x_to_snapshot[-1]/self.dx) + 5020
self.frame_width = frame_width_in_dx
self.multi = multi
def _allocate_memory(self):
self.grid_information = np.array([self.peak_timestep, self.pulse_duration, self.dx, self.frame_width])
self.used_propagations = np.array(self.x_to_snapshot)
self.used_intensities = np.array(self.intensities)
def _create_grids(self, intensity):
soliton_grid = f.Grid(self.nx, dx=self.dx, courant=0.5) #12mm 550000
# Step 1: init media
soliton_grid[5:self.nx-10] = f.CentroRamanMedium(name='Varin', chi_1=[0.69617, 0.40794, 0.89748], w0=[2.7537e16, 1.6205e16, 1.9034e14], chi_3=[1.94e-22, 0, 0], alpha=[0.7, 0, 0], wr=[8.7722e13, 0, 0], gamma_K=[0, 0, 0], gamma_R=[3.1250e13, 0, 0], permeability=1, conductivity=0, eps_inf=1)
# Step 2: init src
soliton_grid[3] = f.SechEnveloped(name='Varin', wavelength=1.5e-06, pulse_duration=self.pulse_duration, Intensity=intensity, peak_timestep=self.peak_timestep, tfsf=False)
# Step 3: init frame
soliton_grid[5:(6+self.frame_width)] = f.MovingFrame(x_to_snapshot=self.x_to_snapshot, central_wavelength=self.central_wavelength)
# Step 4: init boundaries
soliton_grid[0] = f.LeftSideGridBoundary()
soliton_grid[self.nx - 1] = f.RightSideGridBoundary()
soliton_grid.local_observers[0]._allocate_memory()
timesteps = soliton_grid.local_observers[0].timesteps_to_store[-1] + 5000
# Step 5: start benchmark
soliton_grid.run_timesteps(timesteps, vis=False)
# due to multiprocessing it is more convenient to store data by process (nobody loves waiting)
observed_process_data = soliton_grid.local_observers[0].stored_data
return observed_process_data
def store_obs_data(self):
start_time = time.time()
self._allocate_memory()
self.allocate_directory()
file_grid_info = os.path.join(self.dir_path, 'info.npy')
file_observed_data = os.path.join(self.dir_path, 'Int_Pos_E.npy')
file_used_propagations = os.path.join(self.dir_path, 'propagations.npy')
file_used_intensities = os.path.join(self.dir_path, 'intensities.npy')
np.save(file_grid_info, arr=self.grid_information)
np.save(file_used_intensities, arr=self.used_intensities)
np.save(file_observed_data, arr=self.observed_data)
np.save(file_used_propagations, arr=self.used_propagations)
print("stored in --- %s seconds ---" % (time.time() - start_time))
def run_benchmark(self):
start_time = time.time()
if self.multi:
processpool = Pool()
self.observed_data = np.array(processpool.map(self._create_grids, self.intensities))
else:
self.observed_data = np.zeros(shape=(np.size(self.intensities), len(self.x_to_snapshot), self.frame_width + 1))
for intensity_index, intensity in zip(range(np.size(self.intensities)), self.intensities):
self.observed_data[intensity_index] = self._create_grids(intensity)
print("computed in --- %s seconds ---" % (time.time() - start_time))
class TiO2_Si02_Dielectric_Mirror_Setup:
def __init__(self, N_lambda_media, wavelength_guided_for, wavelength, ampl, timesteps, number_of_layer_pairs, vary_layers=False, vary_inc_wavelength=False):
self.N_lambda = N_lambda_media
self.layer_number = [0 + i for i in range(number_of_layer_pairs)]
self.number_of_layer = number_of_layer_pairs
self.ampl = ampl
self.timesteps = timesteps
self.lamb = wavelength
self.lamb_guided = wavelength_guided_for
self.vary_layers = vary_layers
self.vary_lambda = vary_inc_wavelength
self.ti_n = 2.519 # refractiveindex.info
self.si_n = 1.453 # refractiveindex.info
self.dx = self.lamb/(self.ti_n * self.N_lambda)
self.d_ti = int(self.lamb_guided / (self.ti_n * 4 * self.dx)) # huge problem
self.d_si = int(self.lamb_guided / (self.si_n * 4 * self.dx)) # huge problem
self.Nx = self.number_of_layer*(self.d_si + self.d_ti) + 14
self.starting_locations_ti = [8 + i*(self.d_ti + self.d_si) for i in self.layer_number]
self.starting_locations_si = np.array(self.starting_locations_ti) + self.d_ti
self.position_src = 6
self.position_obs = 3
self.incident_wavelengths = [self.lamb + (i / 101) * self.lamb_guided for i in np.arange(0, 101, 1)]
self.refl_ampl = []
self.theory_R = []
def _construct_non_vary_grid(self):
end_mur = self.Nx - 1
grid_non_vary = f.Grid(dx=self.dx, nx=self.Nx)
grid_non_vary[0] = f.LeftSideMur()
grid_non_vary[end_mur] = f.RightSideMur()
grid_non_vary[self.position_src] = f.ActivatedSinus(name='Laser', wavelength=self.lamb, carrier_wavelength=6000e-09, tfsf=True, amplitude=self.ampl, phase_shift=0)
grid_non_vary[self.position_obs] = f.QuasiHarmonicObserver(name='Observer', first_timestep=self.timesteps - 200)
for layer_pair in self.layer_number:
ti_start = self.starting_locations_ti[layer_pair]
si_start = self.starting_locations_si[layer_pair]
grid_non_vary[ti_start:si_start] = f.NonDispersiveMedia('TiO2', permittivity=self.ti_n**2, permeability=1, conductivity=0)
grid_non_vary[si_start:(si_start+self.d_si)] = f.NonDispersiveMedia('SiO2', permittivity=self.si_n**2, permeability=1, conductivity=0)
grid_non_vary.run_timesteps(self.timesteps)
self.refl_ampl.append(grid_non_vary.local_observers[0].amplitude)
def _construct_vary_grid(self):
for layer in self.layer_number:
Nx = (layer + 1) * (self.d_si + self.d_ti) + 12
end_mur = Nx - 1
grid_vary = f.Grid(dx=self.dx, nx=Nx)
grid_vary[end_mur] = f.RightSideMur()
grid_vary[0] = f.LeftSideMur()
grid_vary[self.position_obs] = f.QuasiHarmonicObserver(name='Observer', first_timestep=self.timesteps - 300)
grid_vary[self.position_src] = f.ActivatedSinus(name='Laser', wavelength=self.lamb, carrier_wavelength=6000e-09, tfsf=True, amplitude=self.ampl, phase_shift=0)
for layers in range(0, layer + 1):
ti_start = self.starting_locations_ti[layers]
si_start = self.starting_locations_si[layers]
grid_vary[ti_start:si_start] = f.NonDispersiveMedia('TiO2', permittivity=self.ti_n**2, permeability=1, conductivity=0)
grid_vary[si_start:(si_start + self.d_si)] = f.NonDispersiveMedia('SiO2', permittivity=self.si_n**2, permeability=1, conductivity=0)
grid_vary.run_timesteps(self.timesteps, vis=False)
self.refl_ampl.append(grid_vary.local_observers[0].amplitude)
self.theory_R.append(((self.si_n**(2*(layer+1))-self.ti_n**(2*(layer+1)))/(self.si_n**(2*(layer+1))+self.ti_n**(2*(layer+1))))**2)
def _construct_vary_wavelength_grid(self):
for wavelength in self.incident_wavelengths:
end_mur = self.Nx - 1
grid_vary_lamb = f.Grid(dx=self.dx, nx=self.Nx)
grid_vary_lamb[0] = f.LeftSideMur()
grid_vary_lamb[end_mur] = f.RightSideMur()
grid_vary_lamb[self.position_src] = f.ActivatedSinus('Laser', wavelength=wavelength, carrier_wavelength=6000e-09, tfsf=True, amplitude=self.ampl, phase_shift=0)
grid_vary_lamb[self.position_obs] = f.QuasiHarmonicObserver(name='Observer', first_timestep=self.timesteps - 200)
for layer_pair in self.layer_number:
ti_start = self.starting_locations_ti[layer_pair]
si_start = self.starting_locations_si[layer_pair]
grid_vary_lamb[ti_start:si_start] = f.NonDispersiveMedia('TiO2', permittivity=self.ti_n**2, permeability=1, conductivity=0)
grid_vary_lamb[si_start:(si_start + self.d_si)] = f.NonDispersiveMedia('SiO2', permittivity=self.si_n**2, permeability=1, conductivity=0)
grid_vary_lamb.run_timesteps(self.timesteps, vis=False)
self.refl_ampl.append(grid_vary_lamb.local_observers[0].amplitude)
def _visualize_vary_grid(self):
fig, axes = plt.subplots(1, 1)
axes.plot(np.array(self.layer_number)+1, np.array(self.refl_ampl)**2, linestyle='dashed', color='blue', marker='o', alpha=0.5, label='FDTD')
axes.plot(np.array(self.layer_number)+1, np.array(self.theory_R), linestyle='dashed', color='red', marker='o', alpha=0.5, label='model')
axes.legend(loc='best')
axes.grid(True, linestyle=(0, (1, 5)), color='black', linewidth=1)
axes.set_xlabel('Anzahl der Paare', fontsize=14)
axes.set_ylabel(r'Reflektionsgrad $\mathcal{R}$', fontsize=14)
plt.show()
def _visualize_vary_lamb(self):
fig, axes = plt.subplots(1, 1)
axes.plot(np.array(self.incident_wavelengths)*10**9, np.array(self.refl_ampl)**2, linestyle='dashed', color='blue', marker='o',
alpha=0.5, label='FDTD')
axes.grid(True, linestyle=(0, (1, 5)), color='black', linewidth=1)
axes.set_xlabel(r'$\lambda$ in nm', fontsize=14)
axes.set_ylabel(r'Reflektionsgrad $\mathcal{R}$', fontsize=14)
plt.show()
def run_benchmark(self):
if self.vary_layers:
self._construct_vary_grid()
self._visualize_vary_grid()
elif self.vary_lambda:
self._construct_vary_wavelength_grid()
self._visualize_vary_lamb()
else:
self._construct_non_vary_grid()
class Harmonic_Slab_Setup:
def __init__(self, dx, length_grid_in_dx, length_media_in_dx, start_index_media, wavelength, epsilon, ampl, timesteps):
self.indices = [start_index_media + 2 + i for i in np.arange(0, length_media_in_dx - 1)]
self.start_media = start_index_media
self.dx = dx
self.eps = epsilon
self.Nx = length_grid_in_dx
self.length_media = length_media_in_dx
self.lamb = wavelength
self.ampl = ampl
self.wo_phase = []
self.theo_phasenunterschied = []
self.theo_amplitude = []
self.exp_phase = []
self.exp_amplitude = []
self.timesteps = timesteps
def _grid_wo_slab(self):
position_src = self.start_media - 1
position_obs = self.Nx - 3
end_mur = self.Nx - 1
wo_grid = f.Grid(self.Nx, dx=self.dx)
wo_grid[position_src] = f.ActivatedSinus(name='SinsquaredActivated', wavelength=self.lamb, carrier_wavelength=10000.e-09, phase_shift=0, amplitude=self.ampl, tfsf=True)
wo_grid[position_obs] = f.QuasiHarmonicObserver(name='firstobserver', first_timestep=self.timesteps-200)
wo_grid[0] = f.LeftSideMur()
wo_grid[end_mur] = f.RightSideMur()
wo_grid.run_timesteps(self.timesteps, vis=False)
self.wo_phase.append(wo_grid.local_observers[0].phase)
def _grids_w_slab(self):
position_src = self.start_media - 1
position_obs = self.Nx - 3
end_mur = self.Nx - 1
for ind in self.indices:
# Step 1: init grid
w_grid = 'slab' + str(ind)
w_grid = f.Grid(nx=self.Nx, dx=self.dx)
# Step 2: init media
w_grid[self.start_media:ind] = f.NonDispersiveMedia(name='media', permeability=1, permittivity=self.eps, conductivity=0)
# Step 3: init source
w_grid[position_src] = f.ActivatedSinus(name='SinsquaredActivated', wavelength=self.lamb, carrier_wavelength=10000.e-09, phase_shift=0, amplitude=self.ampl, tfsf=True)
# Step 4: add observer
w_grid[position_obs] = f.QuasiHarmonicObserver(name='firstobserver', first_timestep=self.timesteps-200)
# Step 5: add boundaries
w_grid[0] = f.LeftSideMur()
w_grid[end_mur] = f.RightSideMur()
# Step 6: run simulation
w_grid.run_timesteps(timesteps=self.timesteps, vis=False)
# Step 7: misc
self.exp_amplitude.append(w_grid.local_observers[0].amplitude)
self.exp_phase.append(w_grid.local_observers[0].phase)
self.theo_amplitude.append(theory_dielectric_slab_complex(w_grid)[0])
self.theo_phasenunterschied.append(theory_dielectric_slab_complex(w_grid)[1])
def _visualize(self):
fig, axes = plt.subplots(2, 2)
fig.suptitle(r'$\epsilon_r=$ {epsilon}'.format(epsilon=self.eps) + r' $N_{\lambda_{media}}=$' + '{0:.3}'.format(self.lamb/(self.dx*np.sqrt(self.eps))), fontsize=20)
axes[0][0].plot(np.array(self.indices) - self.start_media, np.array(self.theo_amplitude), label='theorie', color='blue', marker='o', alpha=0.5)
axes[0][0].grid(True, linestyle=(0, (1, 5)), color='black', linewidth=1)
axes[0][0].plot(np.array(self.indices) - self.start_media, np.array(self.exp_amplitude), label='FDTD', linestyle='dashed', color='red', marker='s', alpha=0.5)
axes[0][0].legend(loc='best')
axes[0][0].set_xlabel('Breite des Mediums in ' + r'$\Delta_x$', fontsize=14)
axes[0][0].set_ylabel('Transmittierte Amplitude ' + r'$Ez_{tr}$', fontsize=14)
axes[0][0].set_xlim([0, self.length_media + 1])
axes[0][1].plot(np.array(self.indices) - self.start_media, np.array(self.theo_amplitude) / np.array(self.exp_amplitude), color='black')
axes[0][1].set_ylabel(r'$E_{tr,theo}$ / $E_{tr,FDTD}$', fontsize=14)
axes[0][1].set_xlabel('Breite des Mediums in ' + r'$\Delta_x$', fontsize=14)
axes[0][1].grid(True, linestyle=(0, (1, 5)), color='black', linewidth=1)
axes[0][1].set_xlim([0, self.length_media + 1])
axes[1][0].set_ylabel('Phasenunterschied', fontsize=14)
axes[1][0].plot(np.array(self.indices) - self.start_media, self.theo_phasenunterschied, label='theorie', color='blue', alpha=0.5)
axes[1][0].set_xlabel('Breite des Mediums in ' + r'$\Delta_x$', fontsize=14)
axes[1][0].grid(True, linestyle=(0, (1, 5)), color='black', linewidth=1)
axes[1][0].plot(np.array(self.indices) - self.start_media, -np.array(self.exp_phase) + self.wo_phase, color='red', linestyle='dashed',
label='FDTD', alpha=0.5)
axes[1][0].set_xlim([0, self.length_media + 1])
axes[1][0].legend()
axes[1][1].set_xlabel('Breite des Mediums in ' + r'$\Delta_x$', fontsize=14)
axes[1][1].set_ylabel(r'$d(\phi_{exp},\phi_{theo})$', fontsize=14)
axes[1][1].plot(np.array(self.indices) - self.start_media,
np.abs(-np.array(self.exp_phase) + self.wo_phase - np.array(self.theo_phasenunterschied)), color='black')
axes[1][1].grid(True, linestyle=(0, (1, 5)), color='black', linewidth=1)
axes[1][1].set_xlim([0, self.length_media + 1])
plt.show()
def run_benchmark(self):
self._grid_wo_slab()
self._grids_w_slab()
self._visualize()
| HaneWall/FDTD | fdtd_1d/benchmarks.py | benchmarks.py | py | 43,827 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "constants.BLUE",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "constants.CYAN",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "constants.TEAL",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "constants.ORANGE",
"... |
35375175946 | from django.urls import path
from . import views
from reviews import views as reviewsViews
from .views import SearchBooks
app_name = "books"
urlpatterns = [
path("home/", views.Home, name="home_page"),
path("list/", views.ListBooks, name="list_books"),
path("list/top_ten", views.TopTen, name="top_ten"),
path("<int:book_id>/", views.displayBookInfo, name="view_book"),
path("<int:book_id>/review/", reviewsViews.MakeReview, name="review_book"),
path("<int:book_id>/review/edit", reviewsViews.EditReview, name="edit_review"),
path("<int:book_id>/review/delete", reviewsViews.DeleteReview, name="delete_review"),
path("<int:book_id>/review/like_dislike", reviewsViews.LikeDislike, name="like_dislike"),
path('<int:book_id>/favorite', views.FavoriteBook, name='favorite_book'),
path('search/', views.SearchBooks, name='search_books'),
path('search_isbn/', views.SearchIsbn, name='search_isbn'),
path('isbn_create/', views.CreateFromIsbn, name='create_from_isbn')
]
| stanley-wei/CS35L-Project | books/urls.py | urls.py | py | 1,018 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.Home",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.ListBooks",
... |
11814951006 | # W tym pliku będą znajdować się ważne funkcje dotyczące
# Symulatora Julki. Zrobiłem to po to by nie zaśmiecać
# głównego pliku wykonywalnego.
import json
configFilePath = "./config.json"
loopedAnsw = False
randomizeAnsw = False
stoppingKeywords = None
userCustomJulkaAnsw = None
def initConfig():
global configFilePath
fetchConfigData(configFilePath)
def fetchConfigData(configFilePath):
global userCustomJulkaAnsw, loopedAnsw, randomizeAnsw, stoppingKeywords
try:
config = open(configFilePath, 'r', encoding="utf-8")
except:
print("Coś poszło nie tak. Ścieżka do pliku konfiguracyjnego jest nieprawidłowa lub takowy plik nie istnieje.")
return False
configData = json.load(config)
if configData["customRespStatus"] == True:
print("'customRespStatus' w pliku konfiguracyjnym jest równy: True!")
userCustomJulkaAnsw = configData["customResp"]
loopedAnsw = configData["looped"]
randomizeAnsw = configData["randomizeResp"]
stoppingKeywords = configData["stoppingKeywords"]
print(f"DEBUG INFO: userCustomJulkaAnsw == {userCustomJulkaAnsw}")
print(f"DEBUG INFO: loopedAnsw == {loopedAnsw}")
print(f"DEBUG INFO: randomizeAnsw == {randomizeAnsw}")
print(f"DEBUG INFO: stoppingKeywords == {stoppingKeywords}")
config.close()
# Kiedy wszystko pójdzie OK to funckja zwróci 'True'.
# Dzięki temu zapobiegnie to trywialnym błędom i
# będe mógł sprawdzić czy rzeczywiście wszystko poszło
# dobrze z planem.
return True
# Funkcja wypluwająca z siebie losowo
# wybierane odpowiedzi aniżeli chodząca
# po kolei po tablicy.
#
# Zmienna 'actualJulkaAnsw' jest po to
# żeby funkcja wiedziała jaką aktualnie
# używa tablice do odpowiedzi Julki.
def randomResp(actualJulkaAnsw, looped=False):
from random import randint
actualJulkaAnswRNG = actualJulkaAnsw
if looped == True:
while True:
actualRandNum = randint(0, (len(actualJulkaAnsw)-1))
userInput = input("TY: ")
if userInput in stoppingKeywords:
break
print("JULKA:", actualJulkaAnsw[actualRandNum])
else:
while True:
if len(actualJulkaAnswRNG) <= 0:
break
actualRandNum = randint(0, (len(actualJulkaAnswRNG)-1))
userInput = input("TY: ")
if userInput in stoppingKeywords:
break
print("JULKA:", actualJulkaAnswRNG[actualRandNum])
actualJulkaAnswRNG.pop(actualRandNum)
# Podobnie jak w przypadku funkcji 'randomResp',
# ale cóż... bez losowości.
def typicalResp(actualJulkaAnsw, looped=False):
if looped == True:
i = 0
while True:
if i < len(actualJulkaAnsw):
userInput = input("TY: ")
if userInput in stoppingKeywords:
break
print("JULKA:", actualJulkaAnsw[i])
i += 1
else:
i = 0
else:
for i in range(len(actualJulkaAnsw)):
userInput = input("TY: ")
if userInput in stoppingKeywords:
break
print("JULKA:", actualJulkaAnsw[i])
| REMOLP/JulkaSim-Remake | julkalib.py | julkalib.py | py | 3,301 | python | pl | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 83,
"usage_type": "call"
}
] |
23553893112 | from django.urls import path
from . import views
urlpatterns = [
path('<slug:category_name>/', views.CategoryView.as_view(), name="category"),
path('<slug:category>/<slug:slug>/', views.PostDetailView.as_view(), name="detail_post"),
path('<slug:tag_slug>/', views.TagDetailView.as_view(), name="detail_tag"),
path("", views.HomeView.as_view()),
] | anuarmurawski84/TestDjango2 | blog/urls.py | urls.py | py | 365 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
31464620825 | import logging
import os
import uuid
from typing import Tuple, Dict, Optional, Callable, Any
from nebullvm.base import (
ModelParams,
DeepLearningFramework,
QuantizationType,
Device,
)
from nebullvm.config import (
AUTO_TVM_TUNING_OPTION,
AUTO_TVM_PARAMS,
QUANTIZATION_DATA_NUM,
CONSTRAINED_METRIC_DROP_THS,
)
from nebullvm.inference_learners.tvm import (
TVM_INFERENCE_LEARNERS,
ApacheTVMInferenceLearner,
)
from nebullvm.measure import compute_relative_difference
from nebullvm.optimizers.base import BaseOptimizer
from nebullvm.optimizers.quantization.tvm import TVMCalibrator
from nebullvm.optimizers.quantization.utils import (
check_quantization,
check_precision,
)
from nebullvm.optional_modules.onnx import onnx
from nebullvm.optional_modules.torch import torch, Module
from nebullvm.optional_modules.tvm import (
tvm,
IRModule,
NDArray,
XGBTuner,
autotvm,
relay,
ToMixedPrecision,
)
from nebullvm.transformations.base import MultiStageTransformation
from nebullvm.utils.data import DataManager
from nebullvm.utils.onnx import (
get_input_names,
)
from nebullvm.utils.torch import create_model_inputs_torch
logger = logging.getLogger("nebullvm_logger")
class ApacheTVMOptimizer(BaseOptimizer):
"""Class for compiling the AI models on Nvidia GPUs using TensorRT."""
def optimize_from_torch(
self,
torch_model: Module,
model_params: ModelParams,
device: Device,
input_tfms: MultiStageTransformation = None,
metric_drop_ths: float = None,
quantization_type: QuantizationType = None,
metric: Callable = None,
input_data: DataManager = None,
model_outputs: Any = None,
) -> Optional[Tuple[ApacheTVMInferenceLearner, float]]:
logger.info(
f"Optimizing with {self.__class__.__name__} and "
f"q_type: {quantization_type}."
)
target = self._get_target(device)
mod, params = self._build_tvm_model_from_torch(
torch_model, model_params, device
)
if quantization_type is not None:
if quantization_type is QuantizationType.HALF:
mod = ToMixedPrecision(mixed_precision_type="float16")(mod)
else:
if quantization_type is QuantizationType.DYNAMIC:
inputs = None
elif quantization_type is QuantizationType.STATIC:
inputs = input_data.get_split("train").get_numpy_list(
QUANTIZATION_DATA_NUM
)
input_names = [f"input_{n}" for n in range(len(inputs[0]))]
inputs = TVMCalibrator(inputs, input_names)
else:
return
mod = self._quantize(mod, params, input_data=inputs)
tuning_records = self._tune_tvm_model(target, mod, params)
with autotvm.apply_history_best(tuning_records):
with tvm.transform.PassContext(opt_level=3, config={}):
lib = relay.build(mod, target=target, params=params)
# Remove temporary file created by tvm
os.remove(tuning_records)
learner = TVM_INFERENCE_LEARNERS[
DeepLearningFramework.PYTORCH
].from_runtime_module(
input_tfms=input_tfms,
network_parameters=model_params,
lib=lib,
target_device=target,
input_names=[
f"input_{i}" for i in range(len(model_params.input_infos))
],
input_data=list(input_data.get_list(1)[0])
if input_data is not None
else None,
)
test_input_data, ys = input_data.get_split("test").get_list(
with_ys=True
)
is_valid, metric_drop = check_precision(
learner,
test_input_data,
model_outputs,
metric_drop_ths
if quantization_type is not None
else CONSTRAINED_METRIC_DROP_THS,
metric_func=metric
if quantization_type is not None
else compute_relative_difference,
ys=ys,
)
if not is_valid:
if quantization_type is None:
logger.warning(
"The model optimized with Pytorch tvm gives a "
"different result compared with the original model. "
"This compiler will be skipped."
)
return None
return learner, metric_drop
def optimize(
self,
model: str,
output_library: DeepLearningFramework,
model_params: ModelParams,
device: Device,
input_tfms: MultiStageTransformation = None,
metric_drop_ths: float = None,
quantization_type: QuantizationType = None,
metric: Callable = None,
input_data: DataManager = None,
model_outputs: Any = None,
) -> Optional[Tuple[ApacheTVMInferenceLearner, float]]:
"""Optimize the input model with Apache TVM.
Args:
model (str): Path to the saved onnx model.
output_library (str): DL Framework the optimized model will be
compatible with.
model_params (ModelParams): Model parameters.
device: (Device): Device where the model will be run.
input_tfms (MultiStageTransformation, optional): Transformations
to be performed to the model's input tensors in order to
get the prediction.
metric_drop_ths (float, optional): Threshold for the accepted drop
in terms of precision. Any optimized model with an higher drop
will be ignored.
quantization_type (QuantizationType, optional): The desired
quantization algorithm to be used.
metric (Callable, optional): If given it should
compute the difference between the quantized and the normal
prediction.
input_data (DataManager, optional): User defined data.
model_outputs (Any): Outputs computed by the original model.
Returns:
ApacheTVMInferenceLearner: Model optimized with TVM. The model
will have an interface in the DL library specified in
`output_library`.
"""
logger.info(
f"Optimizing with {self.__class__.__name__} and "
f"q_type: {quantization_type}."
)
check_quantization(quantization_type, metric_drop_ths)
target = self._get_target(device)
mod, params = self._build_tvm_model_from_onnx(model, model_params)
if quantization_type is not None:
if quantization_type is QuantizationType.HALF:
mod = ToMixedPrecision(mixed_precision_type="float16")(mod)
else:
if quantization_type is QuantizationType.DYNAMIC:
inputs = None
elif quantization_type is QuantizationType.STATIC:
inputs = input_data.get_split("train").get_numpy_list(
QUANTIZATION_DATA_NUM
)
inputs = TVMCalibrator(inputs, get_input_names(model))
else:
return
mod = self._quantize(mod, params, input_data=inputs)
tuning_records = self._tune_tvm_model(target, mod, params)
with autotvm.apply_history_best(tuning_records):
with tvm.transform.PassContext(opt_level=3, config={}):
lib = relay.build(mod, target=target, params=params)
# Remove temporary file created by tvm
os.remove(tuning_records)
learner = TVM_INFERENCE_LEARNERS[output_library].from_runtime_module(
input_tfms=input_tfms,
network_parameters=model_params,
lib=lib,
target_device=target,
input_names=get_input_names(model),
input_data=list(input_data.get_list(1)[0])
if input_data is not None
else None,
)
test_input_data, ys = input_data.get_split("test").get_list(
with_ys=True
)
is_valid, metric_drop = check_precision(
learner,
test_input_data,
model_outputs,
metric_drop_ths
if quantization_type is not None
else CONSTRAINED_METRIC_DROP_THS,
metric_func=metric
if quantization_type is not None
else compute_relative_difference,
ys=ys,
)
if not is_valid:
if quantization_type is None:
logger.warning(
"The model optimized with ONNX tvm gives a "
"different result compared with the original model. "
"This compiler will be skipped."
)
return None
return learner, metric_drop
@staticmethod
def _build_tvm_model_from_torch(
torch_model: Module, model_params: ModelParams, device: Device
) -> Tuple[IRModule, Dict[str, NDArray]]:
shape_dict = {
f"input_{i}": (
model_params.batch_size,
*input_size,
)
for i, input_size in enumerate(model_params.input_sizes)
}
inputs = tuple(
create_model_inputs_torch(
model_params.batch_size, model_params.input_infos
)
)
if device is not Device.GPU:
inputs = tuple(input_.cpu() for input_ in inputs)
torch_model.cpu()
with torch.no_grad():
_ = torch_model(*inputs)
model_trace = torch.jit.trace(torch_model, inputs)
model_trace.eval()
mod, params = relay.frontend.from_pytorch(
model_trace, list(shape_dict.items())
)
return mod, params
@staticmethod
def _build_tvm_model_from_onnx(
onnx_model_path: str, model_params: ModelParams
) -> Tuple[IRModule, Dict[str, NDArray]]:
shape_dict = {
input_key: (
model_params.batch_size,
*input_size,
)
for input_key, input_size in zip(
get_input_names(onnx_model_path), model_params.input_sizes
)
}
onnx_model = onnx.load(onnx_model_path)
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
return mod, params
@staticmethod
def _quantize(
mod: IRModule,
params: Dict[str, NDArray],
input_data: TVMCalibrator = None,
) -> IRModule:
if input_data is not None:
with relay.quantize.qconfig(
calibrate_mode="kl_divergence", weight_scale="max"
):
mod = relay.quantize.quantize(mod, params, dataset=input_data)
else:
with relay.quantize.qconfig(
calibrate_mode="global_scale", global_scale=8.0
):
mod = relay.quantize.quantize(mod, params)
return mod
@staticmethod
def _get_target(device) -> str:
if device is Device.GPU:
return str(tvm.target.cuda())
else:
return "llvm" # run on CPU
@staticmethod
def _tune_tvm_model(
target: str, mod: IRModule, params: Dict[str, NDArray]
) -> str:
"""Tune the model using AutoTVM."""
# TODO: add support to Ansor
tuning_records = f"{uuid.uuid4()}_model_records.json"
# create a TVM runner
runner = autotvm.LocalRunner(
number=AUTO_TVM_PARAMS["number"],
repeat=AUTO_TVM_PARAMS["repeat"],
timeout=AUTO_TVM_PARAMS["timeout"],
min_repeat_ms=AUTO_TVM_PARAMS["min_repeat_ms"],
# TODO modify min_repeat_ms for GPU usage
enable_cpu_cache_flush=True,
)
# begin by extracting the tasks from the onnx model
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params
)
# Tune the extracted tasks sequentially.
for i, task in enumerate(tasks):
tuner_obj = XGBTuner(task, loss_type="rank")
tuner_obj.tune(
n_trial=min(
AUTO_TVM_TUNING_OPTION["trials"], len(task.config_space)
),
early_stopping=AUTO_TVM_TUNING_OPTION["early_stopping"],
measure_option=autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"),
runner=runner,
),
callbacks=[
autotvm.callback.log_to_file(tuning_records),
],
)
return tuning_records
| MuntahaShams/nebullvm | nebullvm/optimizers/tvm.py | tvm.py | py | 12,889 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "nebullvm.optimizers.base.BaseOptimizer",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "nebullvm.optional_modules.torch.Module",
"line_number": 55,
"usage_type": "name"... |
25056266008 | import re
import gspread
import os
import json
from itertools import takewhile
consonants_bw = "['|>&<}bptvjHxd*rzs$SDTZEgfqklmnhwy]"
double_cons = re.compile('{}{}'.format(consonants_bw, consonants_bw))
CONS_CLUSTER_IMPLEM = False
POS_NOMINAL = [
'abbrev', 'adj', 'adj_comp', 'adj_num', 'adv', 'adv_interrog',
'adv_rel', 'foriegn', 'interj', 'noun', 'noun_num', 'noun_prop',
'noun_quant', 'pron', 'pron_dem', 'pron_exclam', 'pron_interrog',
'pron_rel', 'verb_nom', 'verb_pseudo']
def patternize_root(root, dc=None, surface_form=False):
"""Will patternize denuded roots (except patterns which are inherently
geminate which it treats as a root), while keeping defective letters and
gemination apparent."""
pattern = []
soundness = []
c = 0
for char in root:
if char in [">", "&", "<", "}", "'"]:
pattern.append(">" if not surface_form else char)
c += 1
soundness.append('mhmz')
elif char in ["w", "Y", "y"]:
pattern.append(char)
c += 1
soundness.append('def')
elif char in ["a", "i", "u", "o"]:
pattern.append(char)
elif char == "~":
pattern.append(char)
soundness.append('gem')
elif char == "A":
if c == 2:
pattern.append("aA")
else:
pattern.append("aAo")
c += 1
soundness.append('def')
else:
c += 1
pattern.append(str(c))
if dc and char == "#":
pattern.append(str(c))
soundness_ = f"mhmz{soundness.count('mhmz')}+" if soundness.count(
'mhmz') else ''
soundness_ += f"def{soundness.count('def')}+" if soundness.count(
'def') else ''
soundness_ += f"gem+" if soundness.count('gem') else ''
soundness = "sound" if soundness_ == '' else soundness_[:-1]
return pattern, soundness
def correct_soundness(soundness):
"""For abstract patterns which are inherently geminate (e.g., 1a2~a3).
"""
soundness = re.sub(r'\+{0,1}gem', '', soundness)
return soundness if soundness else 'sound'
def analyze_pattern(lemma, root=None, surface_form=False):
lemma_raw = lemma
lemma = re.sub(r'\|', '>A', lemma)
dc = None
if CONS_CLUSTER_IMPLEM:
contains_double_cons = double_cons.search(lemma)
if contains_double_cons:
if len(contains_double_cons.regs) > 1:
raise NotImplementedError
start, end = contains_double_cons.regs[0][0], contains_double_cons.regs[0][1]
dc = contains_double_cons[0]
lemma = lemma[:start] + '#' + lemma[end:]
lemma_undiac = re.sub(r'[auio]', '', lemma)
num_letters_lemma = len(lemma_undiac)
exception = is_exception(lemma)
if exception:
return exception
result = {'pattern': None,
'pattern_abstract': None,
'soundness': None,
'error': None}
# Triliteral (denuded)
# 1a2a3
if num_letters_lemma == 3:
pattern, soundness = patternize_root(lemma, dc, surface_form)
abstract_pattern = "1a2a3"
# Triliteral (augmented) and quadriliteral (denuded and augmented)
elif num_letters_lemma > 3:
if num_letters_lemma == 4:
# 1a2~3 (tri)
if lemma[3] == "~" and lemma[1] != "A":
pattern, soundness = patternize_root(lemma, dc, surface_form)
soundness = correct_soundness(soundness)
abstract_pattern = "1a2~a3"
# 1A2a3 (tri)
elif lemma[1] == "A":
lemma_ = lemma[:1] + lemma[2:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(1, "A")
abstract_pattern = "1A2a3"
# >a1o2a3 (tri) [has precedence over the next clause]
elif lemma[0] == ">" and dc is None and (len(root) == 3 if root else True):
lemma_ = lemma[2:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(0, ">a")
abstract_pattern = ">a1o2a3"
# 1a2o3a4 (quad)
elif lemma[3] == "o":
pattern, soundness = patternize_root(lemma, dc, surface_form)
abstract_pattern = "1a2o3a4"
else:
result['error'] = '4'
return result
elif num_letters_lemma == 5:
if lemma[0] == "t":
# ta1A2a3 (tri)
if lemma[3] == "A":
lemma_ = lemma[2] + lemma[4:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(0, "ta")
pattern.insert(2, "A")
abstract_pattern = "ta1A2a3"
# ta1a2~3 (tri) or ta1a2o3a4 (quad)
elif lemma[5] == "~" or lemma[5] == "o":
lemma_ = lemma[2:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
soundness = correct_soundness(soundness)
pattern.insert(0, "ta")
abstract_pattern = "ta1a2~3" if lemma[5] == "~" else "ta1a2o3a4"
else:
result['error'] = '5+t'
return result
# {ino1a2a3 (tri)
elif lemma.startswith("{ino") and (lemma[4] == root[0] if root else True) or \
lemma.startswith("{im~"):
if lemma.startswith("{im~"):
lemma_ = lemma[2] + "o" + lemma[5:]
else:
lemma_ = lemma[4:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
if lemma.startswith("{im~"):
pattern.insert(0, "{i")
pattern[2] = '~a'
else:
pattern.insert(0, "{ino")
abstract_pattern = "{ino1a2a3"
# {i1o2a3~ (tri) [has precedence over the next clause]
elif lemma[0] == "{" and lemma[-1] == "~" and (lemma[4] == root[1] if root else True):
lemma_ = lemma[2:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
soundness = correct_soundness(soundness)
pattern.insert(0, "{i")
abstract_pattern = "{i1o2a3~"
# {i1ota2a3 (tri)
elif lemma[0] == "{" and (lemma[4] in ["t", "T"] and lemma[4] not in ["m"] or
lemma[3] == "~" or lemma[2] == 'z'):
abstract_pattern = "{i1ota2a3"
if len(lemma) == 7:
lemma_ = lemma[2:4] + lemma[5:]
elif lemma[3] == "~":
if len(lemma) == 6:
lemma_ = lemma[2] + "o" + lemma[4:]
else:
lemma_ = lemma[2] + "o" + lemma[5:]
else:
lemma_ = lemma[2:4] + lemma[6:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(0, "{i")
if lemma[3] == "~":
pattern[2] = "~"
if len(lemma) != 6:
pattern[2] = "~a"
if root and root[0] != 't':
pattern[1] = 't'
elif len(lemma) in [6, 7]:
pattern.insert(3, "t")
else:
pattern.insert(3, "ta")
else:
result['error'] = '5'
return result
elif num_letters_lemma == 6:
# {isota1o2a3 (tri)
if lemma.startswith("{iso") and lemma[4] == 't':
lemma_ = lemma[6:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(0, "{isota")
abstract_pattern = "{isota1o2a3"
# {i1oEawo2a3 (tri)
elif lemma.startswith("{i") and lemma[6:8] == "wo":
lemma_ = lemma[2:4] + lemma[8:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(0, "{i")
pattern.insert(3, "2awo")
abstract_pattern = "{i1o2awo2a3"
# {i1o2a3a4~ (quad)
elif lemma[-1] == "~":
lemma_ = lemma[2:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
soundness = correct_soundness(soundness)
if soundness == "def1":
pattern[3] = "aAo"
pattern.insert(0, "{i")
abstract_pattern = "{i1o2a3a4~"
# {i1o2ano3a4 (quad)
elif lemma[6:8] == "no":
lemma_ = lemma[2:6] + lemma[8:]
pattern, soundness = patternize_root(lemma_, dc, surface_form)
pattern.insert(0, "{i")
pattern.insert(5, "no")
abstract_pattern = "{i1o2ano3a4"
else:
result['error'] = '6'
return result
else:
result['error'] = '>4'
return result
# If there are less than 3 letters (maybe there is a problem)
else:
result['error'] = '<3'
return result
pattern = ''.join(pattern)
if surface_form:
pattern = re.sub(r'>aAo|>A', '|', pattern)
pattern = re.sub(r'a?Ao?', 'A', pattern)
if abstract_pattern == "{i1ota2a3" and '~' not in lemma_raw or \
abstract_pattern == "{i1ota2a3" and lemma[3] != "~" and len(lemma) not in [6, 7]:
pattern = pattern[:4] + lemma_raw[4] + pattern[5:]
elif abstract_pattern == "{i1ota2a3"and lemma[0] == "{" and (lemma[4] in ["t", "T"] or
lemma[3] == "~" or lemma[2] == 'z'):
if root and root[0] in ['d', 'D', 'v', 'T', 'Z']:
pattern = pattern[:2] + '1' + pattern[3:]
result['pattern'] = pattern
result['pattern_abstract'] = abstract_pattern
result['soundness'] = soundness
return result
def is_exception(lemma):
exceptions = {
">anojolaz": {'pattern': '>a2o3o4a5', 'pattern_abstract': '1a2o3o4a5', 'soundness': 'sound', 'error': None},
"ta>anojolaz": {'pattern': 'ta>a2o3o4a5', 'pattern_abstract': 'ta1a2o3o4a5', 'soundness': 'sound', 'error': None}
}
return exceptions.get(lemma)
def assign_pattern(lemma, root=None):
info = analyze_pattern(lemma, root)
info_surf = analyze_pattern(lemma, root, surface_form=True)
result = {'pattern_conc': info['pattern'],
'pattern_surf': info_surf['pattern'],
'pattern_abstract': info['pattern_abstract'],
'soundness': info['soundness'],
'error': info['error']}
return result
def analyze_pattern_egy(root, stem):
i = 0
tmp_stem = stem
first_part = ''
second_part = stem
for char in root:
i = i+1
if char not in 'yw><&C{}C':
# print tmp_stem,root,char,i
if char == '$':
char = '\$'
if char == '*':
char = '\*'
second_part = re.sub(char, str(i), second_part, 1)
# print root, second_part
# when w is a consonant that is the first letter
elif char == 'w' and second_part.startswith('w') and i == 1:
second_part = re.sub(char, str(i), second_part, 1)
elif char == 'y' and i == 1:
second_part = re.sub(char, str(i), second_part, 1)
elif (char == 'y' or char == 'w') and i == len(root) and (second_part.endswith('A') or second_part.endswith('a')):
second_part = second_part[:-1] + 'aY'
stem = stem[:-1] + 'aY'
elif (char == 'y' or char == 'w') and i == len(root) and (second_part.endswith('A') or second_part.endswith('a')):
second_part = second_part[:-1] + 'aY'
stem = stem[:-1] + 'aY'
elif (char == 'y' or char == 'w') and i == len(root) and (second_part.endswith('A') or second_part.endswith('a')):
second_part = second_part[:-1] + 'A'
stem = stem[:-1] + 'A'
elif second_part.endswith('i'):
second_part = second_part[:-1] + 'iy'
stem = stem[:-1] + 'iy'
elif char == 'y': # when y in the root is a consonant
if not re.search('iy(?!a|i|u|\~)', second_part, 1):
second_part = re.sub(r'y', str(i), second_part, 1)
# print second_part
else:
second_part = re.sub(
r'([aui\~])y([aiu\~])', r'\g<1>'+str(i)+r'\g<2>', second_part)
elif char == 'w': # when w in the root is a consonant
if not re.search('uw(?!a|i|u|\~)', second_part):
second_part = re.sub(r'w', str(i), second_part, 1)
elif char == 'C' and i == len(root):
# print 'Salam'
if second_part.endswith('a'):
# print 'in',root, i, char,second_part,stem
second_part = second_part[:-1] + 'A'
stem = stem[:-1] + 'A'
# print 'out',second_part
elif second_part.endswith('i'):
second_part = second_part[:-1] + 'iy'
stem = stem[:-1] + 'iy'
else:
pass
if str(i) in second_part:
# print second_part, root
first_part = first_part + second_part.split(str(i))[0]+str(i)
second_part = second_part.split(str(i))[1]
# print first_part,second_part
else:
second_part = second_part
tmp_stem = first_part+second_part
# print second_part
# hardcamel_morphd stuff
if stem == 'AiftataH':
tmp_stem = 'Ai1ta2a3'
elif stem == 'yiftitiH':
tmp_stem = 'yi1ti2i3'
elif stem == 'Aistashal' or stem == 'Aistaslam':
tmp_stem = 'Aista12a3'
elif stem == 'yistashal' or stem == 'yistaslam':
tmp_stem = 'yista12i3'
return tmp_stem
def add_check_mark_online(rows,
spreadsheet,
worksheet,
error_cases=None,
indexes=None,
messages=None,
mode=None,
write='append',
status_col_name='STATUS',
service_account='/Users/chriscay/.config/gspread/service_account.json'):
assert bool(error_cases) ^ bool(indexes) ^ bool(messages)
if error_cases is not None:
filtered = rows[rows['LEMMA'].isin(error_cases)]
indexes = list(filtered.index)
if type(spreadsheet) is str:
sa = gspread.service_account(service_account)
spreadsheet = sa.open(spreadsheet)
if type(worksheet) is str:
worksheet = spreadsheet.worksheet(title=worksheet)
header = worksheet.row_values(1)
header_count = header.count(status_col_name)
if header_count == 0:
worksheet.insert_cols([[status_col_name]])
header = worksheet.row_values(1)
elif header_count > 1:
raise NotImplementedError
status_column_index = header.index(status_col_name)
column_letter = (chr(ord('A') - 1 + status_column_index // 26)
if status_column_index >= 26 else '')
column_letter += chr(ord('A') + status_column_index % 26)
status_old = worksheet.col_values(status_column_index + 1)[1:]
lemmas = worksheet.col_values(header.index('LEMMA') + 1)[1:]
status_old += [''] * (len(lemmas) - len(status_old))
assert len(lemmas) == len(status_old) == len(rows['LEMMA'])
col_range = f'{column_letter}2:{len(rows.index) + 1}'
if indexes:
if mode:
check, ok = f'{mode}:CHECK', f'{mode}:OK'
else:
check, ok = 'CHECK', 'OK'
assert set(status_old) <= {check, ok, ''}
status_new = [[check] if i in indexes else ([ok] if status_old[i] != check else [check])
for i in range(len(rows['LEMMA']))]
elif messages:
assert len(status_old) == len(lemmas) == len(messages)
if mode:
mode = f'{mode}:'
else:
mode = ''
if write == 'overwrite':
status_new = [[f'{mode}{message}'] if message else ['']
for message in messages]
elif write == 'append':
status_new = [[f"{s}{' ' if s else ''}" + f'{mode}{message}'] if message else [s + '']
for s, message in zip(status_old, messages)]
else:
raise NotImplementedError
worksheet.update(col_range, status_new)
def _strip_brackets(info):
if info[0] == '[' and info[-1] == ']':
info = info[1:-1]
return info
def get_config_file(config_file):
configs_dir = os.path.join(
'/'.join(os.path.dirname(__file__).split('/')[:-1]), 'configs')
with open(os.path.join(configs_dir, config_file)) as f:
config = json.load(f)
return config
def get_db_dir_path(config, config_name):
dialect = config['local'][config_name]['dialect']
return os.path.join('databases',
f'camel-morph-{dialect}')
def get_db_path(config, config_name):
dialect = config['local'][config_name]['dialect']
db_name = config['local'][config_name]['db']
return os.path.join('databases',
f'camel-morph-{dialect}',
db_name)
def get_data_dir_path(config, config_name):
dialect = config['local'][config_name]['dialect']
return os.path.join('data',
f'camel-morph-{dialect}',
config_name)
def get_lex_paths(config, config_name):
config_local = config['local'][config_name]
dialect = config_local['dialect']
lex_paths = []
for sheet_name in config_local['lexicon']['sheets']:
lex_dir = get_data_dir_path(config, config_name)
lex_path = os.path.join(lex_dir, f'{sheet_name}.csv')
lex_paths.append(lex_path)
return lex_paths
def lcp(strings):
"Longest common prefix"
def allsame(strings_):
return len(set(strings_)) == 1
return ''.join(i[0] for i in takewhile(allsame, zip(*strings))) | CAMeL-Lab/camel_morph | camel_morph/utils/utils.py | utils.py | py | 18,417 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 82,
"usage_ty... |
1823465172 | import json
from collections import defaultdict
from time import sleep
from uuid import uuid4
import pytest
import requests
from dp.protos.enodebd_dp_pb2 import CBSDRequest, CBSDStateResult, LteChannel
from dp.protos.enodebd_dp_pb2_grpc import DPServiceStub
from magma.test_runner.config import TestConfig
from magma.test_runner.tests.integration_testcase import (
DomainProxyIntegrationTestCase,
)
from retrying import retry
config = TestConfig()
FCC_ID = "some_fcc_id"
USER_ID = "some_user_id"
@pytest.mark.local
class ActiveModeControllerTestCase(DomainProxyIntegrationTestCase):
def setUp(self):
self.serial_number = self._testMethodName + '_' + str(uuid4())
def test_provision_cbsd_in_sas_requested_by_dp_client(self):
self.given_cbsd_provisioned()
def test_logs_are_written_to_elasticsearch(self):
self.given_cbsd_provisioned()
self.then_elasticsearch_contains_logs()
def test_grant_relinquished_after_inactivity(self):
self.given_cbsd_provisioned()
self.when_cbsd_is_inactive()
self.then_cbsd_has_no_grants_in_sas(self.dp_client)
def test_last_used_max_eirp_stays_the_same_after_inactivity(self):
self.given_cbsd_provisioned()
self.when_cbsd_is_inactive()
self.then_cbsd_has_no_grants_in_sas(self.dp_client)
self.given_cbsd_provisioned()
def given_cbsd_provisioned(self):
self.given_cbsd_with_transmission_parameters()
self.dp_client.GetCBSDState(self._build_cbsd_request())
self.then_cbsd_is_eventually_provisioned_in_sas(self.dp_client)
@retry(stop_max_attempt_number=60, wait_fixed=1000)
def then_elasticsearch_contains_logs(self):
query = {
"query": {
"term": {
"cbsd_serial_number.keyword": {
"value": self.serial_number,
},
},
},
}
actual = requests.post(
f"{config.ELASTICSEARCH_URL}/dp*/_search?size=100",
data=json.dumps(query),
headers={
"Content-type": "application/json",
},
).json()
log_field_names = [
"log_from",
"log_to",
"log_name",
"log_message",
"cbsd_serial_number",
"network_id",
"fcc_id",
]
actual_log_types = defaultdict(int)
logs = actual["hits"]["hits"]
for log in logs:
actual_log_types[log["_source"]["log_name"]] += 1
for fn in log_field_names:
self.assertIn(fn, log["_source"].keys())
self.assertEqual(1, actual_log_types["CBSDRegisterRequest"])
self.assertEqual(1, actual_log_types["CBSDRegisterResponse"])
self.assertEqual(1, actual_log_types["registrationRequest"])
self.assertEqual(1, actual_log_types["registrationResponse"])
self.assertEqual(1, actual_log_types["spectrumInquiryRequest"])
self.assertEqual(1, actual_log_types["spectrumInquiryResponse"])
self.assertEqual(1, actual_log_types["grantRequest"])
self.assertEqual(1, actual_log_types["grantResponse"])
# The number of GetCBSDStateRequest and heartbeatResponse may differ between tests, so only asserting they have been logged
self.assertGreater(actual_log_types["heartbeatRequest"], 0)
self.assertGreater(actual_log_types["heartbeatResponse"], 0)
self.assertGreater(actual_log_types["GetCBSDStateRequest"], 0)
self.assertGreater(actual_log_types["GetCBSDStateResponse"], 0)
# TODO change this when some API for domain proxy is introduced
def given_cbsd_with_transmission_parameters(self):
state = self.dp_client.CBSDRegister(
self._build_cbsd_request(), wait_for_ready=True,
)
self.assertEqual(self._build_empty_state_result(), state)
@staticmethod
def when_cbsd_is_inactive():
inactivity = 3
polling = 1
delta = 3 # TODO investigate if such high delta is needed
total_wait_time = inactivity + 2 * polling + delta
sleep(total_wait_time)
@retry(stop_max_attempt_number=30, wait_fixed=1000)
def then_cbsd_is_eventually_provisioned_in_sas(self, dp_client: DPServiceStub):
state = dp_client.GetCBSDState(self._build_cbsd_request())
self.assertEqual(self._build_get_state_result(), state)
def then_cbsd_has_no_grants_in_sas(self, dp_client: DPServiceStub):
state = dp_client.GetCBSDState(self._build_cbsd_request())
self.assertEqual(self._build_empty_state_result(), state)
def _build_cbsd_request(self) -> CBSDRequest:
return CBSDRequest(
user_id=USER_ID,
fcc_id=FCC_ID,
serial_number=self.serial_number,
min_power=0,
max_power=20,
antenna_gain=15,
number_of_ports=2,
)
@staticmethod
def _build_get_state_result() -> CBSDStateResult:
return CBSDStateResult(
radio_enabled=True,
carrier_aggregation_enabled=False,
channel=LteChannel(
low_frequency_hz=3620_000_000,
high_frequency_hz=3630_000_000,
max_eirp_dbm_mhz=28.0,
),
channels=[
LteChannel(
low_frequency_hz=3620_000_000,
high_frequency_hz=3630_000_000,
max_eirp_dbm_mhz=28.0,
),
],
)
@staticmethod
def _build_empty_state_result() -> CBSDStateResult:
return CBSDStateResult(radio_enabled=False)
| AdityaKoranga/magma | dp/cloud/python/magma/test_runner/tests/test_active_mode_controller.py | test_active_mode_controller.py | py | 5,696 | python | en | code | null | github-code | 1 | [
{
"api_name": "magma.test_runner.config.TestConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "magma.test_runner.tests.integration_testcase.DomainProxyIntegrationTestCase",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 2... |
4421851764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module to check if the DB works
===============================
Runs multiple checks to see if the database was correctly setup.
"""
import argparse
import logging
from orion.core.cli.db.test import main
log = logging.getLogger(__name__)
DESCRIPTION = "(DEPRECATED) Use command `orion db test` instead"
def add_subparser(parser):
"""Add the subparser that needs to be used for this command"""
test_db_parser = parser.add_parser(
"test-db", help=DESCRIPTION, description=DESCRIPTION
)
test_db_parser.add_argument(
"-c",
"--config",
type=argparse.FileType("r"),
metavar="path-to-config",
help="user provided " "orion configuration file",
)
test_db_parser.set_defaults(func=wrap_main)
return test_db_parser
def wrap_main(args):
"""Run through all checks for database."""
log.warning(
"Command `orion test-db` is deprecated and will be removed in v0.2.0. Use "
"`orion db test` instead."
)
main(args)
| lebrice/orion | src/orion/core/cli/test_db.py | test_db.py | py | 1,067 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "orion.core.cli.db.test.main",
"line_number": 45,
"usage_type": "call"
}
] |
38895831805 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # 要放在 import tensorflow as tf 前面才会起作用 !!!
import tensorflow as tf
from tensorflow import keras
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from data import DIV2K
class Reloader:
def __init__(self, model_path):
self.model = keras.models.load_model(model_path, compile=False)
self.dataset_valid = DIV2K(subset='valid').dataset()
def lossShow(train_loss_arr_path, valid_loss_arr_path, save_path):
train_loss_arr = np.load(train_loss_arr_path)
valid_loss_arr = np.load(valid_loss_arr_path)
epochs = len(train_loss_arr)
epochs_arr = np.arange(epochs) + 1
plt.figure()
plt.plot(epochs_arr, train_loss_arr, 'b', label='train_loss')
plt.plot(epochs_arr, valid_loss_arr, 'y', label='valid_loss')
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoches')
plt.title('loss_curve')
plt.savefig(save_path)
def reload(self):
dataIter = iter(self.dataset_valid)
testItem = dataIter.next()
output = self.model(testItem[0])
inputItem = testItem[0][0].numpy()
labelItem = testItem[1][0].numpy()
outputItem = output[0].numpy()
# print(outputItem.shape)
input_img = Image.fromarray(np.uint8(inputItem))
label_img = Image.fromarray(np.uint8(labelItem))
output_img = Image.fromarray(np.uint8(outputItem))
input_img.save('input.png')
label_img.save('label.png')
output_img.save('output.png')
if __name__ == '__main__':
reloader = Reloader(model_path = 'trained_model/FSRCNN/FSRCNN.h5')
reloader.reload()
Reloader.lossShow('trained_model/FSRCNN/train_loss_arr.npy', 'trained_model/FSRCNN/valid_loss_arr.npy', 'trained_model/FSRCNN/loss_curve_fsrcnn')
| MuyaoYuan/FSRCNN-tf | reloader.py | reloader.py | py | 1,879 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 13,
"usage_type": "attribute"
},
{
"ap... |
19327625963 | from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from whatsnew.views import ChangelogView
urlpatterns = patterns(
'whatsnew.views',
url(r'^test/', TemplateView.as_view(template_name='whatsnew/test.html'),
name='whatsnew-test'),
url(r'^changelog/', ChangelogView.as_view(),
name='whatsnew-changelog'),
# url(r'^latest/$', 'latest', name='whatsnew-latest'),
)
| saxix/django-whatsnew | whatsnew/urls.py | urls.py | py | 432 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView.as_view",
"line_number": 8,
"usage_type": "call"
},
{
... |
42131468718 | import email
import csv
from fileinput import filename
from flask import Flask, render_template, url_for,request,redirect
app = Flask(__name__)
print(__name__)# is __main__ because this is main file we are running
#How to make these writing routes dynamic???
@app.route('/')
def my_home():# thats defaults
return render_template('index.html')
@app.route('/<string:page_name>') # we want page name which should be of type string
def html_page(page_name): #give page_name as param
return render_template(page_name)
# just creting function to write to txt
def write_to_text(data):
with open('database.txt', mode ='a') as database:
email = data["email"]
subject = data["subject"]
message = data["message"]
file = database.write(f'\n{email}, {subject}, {message}')
def write_to_csv(data):
with open('data.csv', 'a', newline = '') as csv_data:
email = data['email']
subject = data['subject']
message = data['message']
to_csv_writer = csv.writer(csv_data, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
to_csv_writer.writerow([email, subject, message])
# text = request.form['text']
# processed_text = text.upper()
# message = request.form['message']
# f.write(str(form[message])
@app.route('/submit_form', methods =['POST', 'GET'])
def submit_form():
message = request.form['message']
email = request.form['email']
if request.method == 'POST':
try:
data = request.form.to_dict() #['email']['message'] we are grabbing data in dict
print(data) #instead of printing we will call write method and write to text
write_to_text(data)
write_to_csv(data)
return render_template('thankyou.html')
except:
return 'something is not working, saving failed'
else:
return 'oops try again sth is wrong'
if __name__ == '__main__':
app.run(debug = True) | suprimregmi/Flask | Portfolio/app.py | app.py | py | 2,006 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "csv.writer",
... |
32812980636 | import yaml
import itertools
import random
from django.templatetags.static import static
import pandas as pd
from datetime import date
import json
def parse_yml(path):
with open(path, 'r', 1) as file_stream:
yml_file = yaml.load(file_stream)
return yml_file
def get_next_question_difficulty_level(question_history):
question_history = pd.DataFrame.from_dict(question_history)
question_history = question_history.sort_values(by=['question_difficulty_level', 'interpretable_arguments'])
return 0
def get_available_img_name(img_type='frame'):
if img_type == 'frame':
return ['triangle', 'square', 'pentagon', 'octagon', 'circle']
elif img_type == 'skip 1':
return ['apple', 'car', 'clock', 'diamond', 'hamburger', 'key', 'oval', 'pumpkin', 'triangle', 'bear', 'butterfly', 'chicken', 'deer', 'flower', 'girl', 'goat', 'goose', 'heart',
'horse', 'ox', 'police car', 'rooster', 'rose', 'sheep', 'squirrel', 'tidy bear', 'zebra']
elif img_type == 'skip n':
return ['apples', 'cars', 'clocks', 'diamonds', 'hamburgers', 'keys', 'ovals', 'pumpkins', 'triangles', 'bears', 'butterflies', 'chicken', 'deer', 'flowers', 'girls', 'goats', 'geese', 'hearts',
'horses', 'oxen', 'police cars', 'roosters', 'roses', 'sheep', 'squirrels', 'tidy bears', 'zebras']
elif img_type == 'groups':
return [['bear', 'butterfly', 'deer', 'goose', 'horse', 'ox', 'rooster', 'sheep', 'squirrel', 'zebra'],
['apple', 'car', 'clock', 'diamond', 'hamburger', 'key', 'oval']]
elif img_type == 'measure':
return ['comb', 'pencil', 'scissors', 'forks', 'knife', 'eraser', 'crayon', 'spoon']
def check_user_answer(user_answers, standard_answers):
total_points = len(standard_answers)
earned_points = 0
is_answered = False
for k, v in standard_answers.items():
if k in user_answers:
user_answer = user_answers[k]
if str(user_answer).strip() == str(v).strip() or (str(user_answer).lower()=='' and str(v).strip().lower()=='false'):
earned_points = earned_points+1
if user_answers[k] not in ['false', 'False', '', None, False]:
is_answered = True
return is_answered, earned_points == total_points
def convert_df_to_html(df, show_head=True, table_css=None, table_style=''):
'''
table_css: {'colname_rowindex':css}
'''
table_css = table_css or {}
rows = df.to_dict(orient='rows')
columns = [c.lower().replace(' ','') for c in df.columns]
thead = '''<thead>
<tr style="text-align: center;">
{}
</tr>
</thead>'''
trs = [f'''<table class="table table-bordered table-responsive-md table-striped text-center"> {table_style}''']
ths = []
for col in columns:
ths.append('<th>{}</th>'.format(col))
if show_head:
trs.append(thead.format('\n'.join(ths)))
for i, r in enumerate(rows):
tds = []
for col in columns:
css = table_css.get('{}_{}'.format(col, i))
if css:
tds.append('<td {}></td>'.format(css).format(value=r[col]))
else:
tds.append('<td>{}</td>'.format(r[col]))
trs.append('<tr>{}</tr>'.format('\n'.join(tds)))
trs.append('</table>')
question_table = '\n'.join(trs)
return question_table
def get_pos_nums(num):
pos_nums = []
pos_units = ['Millions + ', 'Hundreds of Thousands +', 'Tens of Thousands +', 'Thousands +', 'Hundreds +', 'Tens +', 'Ones']
while num != 0:
pos_nums.append(num % 10)
num = num // 10
return zip(pos_nums[::-1], pos_units[-len(pos_nums):])
def create_table_html(image_names, image_counts, table_type='list'):
trs = []
trs1 = []
trs2 = []
trs3 = []
max_tds = 10 if max(image_counts) > 9 else max(image_counts)
for img, cnt in zip(image_names, image_counts):
if cnt < 1:
continue
tds = ['<tr><td rowspan="{}">{}</td>'.format((cnt-1)//10+1, img.title())]
for i in range(cnt):
tds.append('''<td><img src="{}" style='width:50px' alt=""></td>'''.format(static('akidzon_icons/items/{}.png'.format(img))))
if (i+1) % 10 == 0 and i != 0:
tds.append('</tr><tr>')
tds = tds + ["<td></td>"]*(max_tds-i % 10-1)
tds.append('</tr>')
trs.append('\n'.join(tds))
trs1.append('''<tr><td><img src="{}" style='width:50px' alt=""></td><td style='width:100px' class='text-center'>{}</td></tr>'''.format(static('akidzon_icons/items/{}.png'.format(img)), cnt))
# trs2.append('''<tr><td><img src="{}" style='width:50px' alt=""></td><td><img src="{}" style='height:50px' alt=""></td></tr>'''.format(static('akidzon_icons/items/{}.png'.format(img)),
# static('akidzon_icons/counting_index/countingstick_{}.png'.format(cnt))))
trs2.append('''<tr><td></td></tr>''')
if table_type == 'tally':
correct_graph_html = '''<table border="1" style='background: transparent'><tr><td class='text-center'>Item</td><td class='text-center'>Count</td></tr>{}</table>'''.format('\n'.join(trs2))
elif table_type == 'count':
correct_graph_html = '''<table border="1" style='background: transparent'><tr><td class='text-center'>Item</td><td class='text-center'>Count</td></tr>{}</table>'''.format('\n'.join(trs1))
elif table_type == 'horizon_table':
tds = ['''<td><img src="{}" style='height:50px' alt=""></td>'''.format(static('akidzon_icons/items/{}.png'.format(img))) for img in image_names]
trs3 = ['''<tr><td style='width:100px'>Items</td>{}</tr>'''.format(''.join(tds))]
tds = ['''<td class='text-center'>{}</td>'''.format(cnt) for cnt in image_counts]
trs3.append('''<tr><td style='width:100px'>How many?</td>{}</tr>'''.format(''.join(tds)))
correct_graph_html = '''<table border="1" style='background: transparent'>{}</table>'''.format('\n'.join(trs3))
else:
correct_graph_html = '''<table border="1" style='background: transparent'>{}</table>'''.format('\n'.join(trs))
return correct_graph_html
def add_limit_factors(max_total, number_of_factors=2, min_factor_value=1, min_total=0):
all_options = [range(min_factor_value, max_total+1) for i in range(number_of_factors)]
factors = [x for x in itertools.product(*all_options) if sum(x) <= max_total and sum(x) > min_total]
return factors
def make_a_wrong_result(factor1, factor2, operation='+'):
if operation == '+':
return random.choice([x for x in range(abs(factor1-factor2), factor1+factor2+5) if x != factor1+factor2])
elif operation == '-':
return random.choice([x for x in range(max(0, factor1-factor2-5), factor1+5) if x != factor1-factor2])
def make_a_formula(target=None, operation='+', is_correct=True, is_formula=False, is_full_formula=False):
target = target or random.randint(1, 20)
if operation == '+':
if is_correct:
factor1, factor2 = random.choice([(x1, x2) for x1 in range(target+1) for x2 in range(target+1) if x1+x2 == target])
else:
factor1, factor2 = random.choice([(x1, x2) for x1 in range(target+1) for x2 in range(target+1) if x1+x2 != target and abs(x1-x2-target) < 4])
elif operation == '-':
operation = '–'
if is_correct:
factor1, factor2 = random.choice([(x1, x2) for x1 in range(target+21) for x2 in range(target+1) if x1-x2 == target])
else:
factor1, factor2 = random.choice([(x1, x2) for x1 in range(target+21) for x2 in range(target+1) if x1-x2 != target and abs(x1-x2-target) < 4])
if is_formula:
formula = '{}{}{}'.format(factor1, operation, factor2)
if is_full_formula:
return '{}={}'.format(formula, target)
else:
return formula
return factor1, factor2
def flatten_list(l):
a = []
for t in l:
a = a + t
return a
def get_word_expression(factor1, factor2, operation='+'):
equals = ['equals', 'is', 'are']
if operation == '+':
plus = ['add', 'plus', 'and']
result = factor1+factor2
if operation == '-':
plus = ['minus', 'subtract', 'take away']
result = factor1-factor2
if random.randint(1, 10) > 4:
factor1 = get_word_from_digits(factor1)
factor2 = get_word_from_digits(factor2)
result = get_word_from_digits(result)
if operation == '+':
return '{} {} {} {} {}'.format(factor1, random.choice(plus), factor2, random.choice(equals), result)
elif operation == '-':
return random.choice(['{} {} {} {} {}'.format(factor1, random.choice(plus), factor2, random.choice(equals), result), '{} is subtracted from {} {} {}'.format(factor2, factor1, random.choice(equals), result)])
def get_dict_object_from_request(request, keyword):
dic = {}
for k, v in request.items():
if k.startswith('{}['.format(keyword)) and k.endswith(']'):
dic[k.replace('{}['.format(keyword), '').replace(']', '')] = v
return dic
def make_a_number(number, standard=True):
tens = number//10
ones = number % 10
if standard:
return tens, ones
else:
shift = random.randint(1, tens)
tens = tens - shift
ones = ones + 10*shift
return tens, ones
def get_word_from_digits(n):
units = ["Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine"]
teens = ["Ten", "Eleven", "Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen"]
tens = ["Twenty", "Thirty", "Fourty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety"]
if n <= 9:
return units[n]
elif n >= 10 and n <= 19:
return teens[n-10]
elif n >= 20 and n <= 99:
return tens[(n//10)-2] + " " + (units[n % 10] if n % 10 != 0 else "")
elif n >= 100 and n <= 999:
return get_word_from_digits(n//100) + " Hundred " + (get_word_from_digits(n % 100) if n % 100 != 0 else "")
elif n >= 1000 and n <= 99999:
return get_word_from_digits(n//1000) + " Thousand " + (get_word_from_digits(n % 1000) if n % 1000 != 0 else "")
elif n >= 100000 and n <= 9999999:
return get_word_from_digits(n//1000000) + " Million " + (get_word_from_digits(n % 100000) if n % 100000 != 0 else "")
return ''
| pkuaaron/akidzon_xblock | akidzon/utils.py | utils.py | py | 10,498 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.tem... |
24032855686 | from os import path
import random
import string
from cgi import FieldStorage
import onlinelux
STORAGE_PATH = path.abspath(path.join(path.dirname(onlinelux.__file__), 'public', 'storage'))
class StorageManager:
"""Images Storage"""
def __init__(self):
self.storage_path = STORAGE_PATH
def store(self, data: FieldStorage):
extension = self.tell_extension(data)
filename = self.id_generator(extension, size=15)
destination = self._specify_path(filename)
with open(destination, mode='wb') as dest:
content = data.file.read()
dest.write(content)
return filename
def _specify_path(self, filename):
return '{}/{}'.format(self.storage_path, filename)
@staticmethod
def tell_extension(file: FieldStorage):
return file.filename.split('.')[-1]
@staticmethod
def id_generator(ext, size=6, chars=string.ascii_lowercase + string.digits):
uid = ''.join(random.choice(chars) for _ in range(size))
return '{}.{}'.format(uid, ext)
| eteamin/onlineluxe | onlinelux/lib/store.py | store.py | py | 1,065 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_numb... |
14815941904 | import pysam
import numpy as np
import cv2
from PIL import Image
from multiprocessing import Process, Queue
import random
import glob
class Query:
def __init__(self,name,cigar,query,qual,mapq,position,indent,ref):
self.name=name
self.ref=ref
self.cigar=cigar
self.query=query
self.qual=qual
self.mapq=mapq
self.position=position
self.indent=indent
def making_query_dict(path,pos_chr,pos_start,insertion_length=0):
term=32
samfile = pysam.AlignmentFile(path, "rb" )
pileupdata=samfile.pileup(pos_chr, pos_start, pos_start+1)
insertion_dict={}
query_dict={}
veryfirst=0
start_index=0
for pileupcolumn in samfile.pileup(pos_chr, pos_start-term, pos_start+1+term):
if(pileupcolumn.pos>=pos_start-term and pileupcolumn.pos<pos_start+1+term):
for pileupread in pileupcolumn.pileups:
if(pileupread.alignment.query_name not in query_dict.keys()):
isinsertion=0
cigar=pileupread.alignment.cigartuples
cigar_info=''
cigar_pos=0
query_sequence=pileupread.alignment.query_sequence
position=pileupread.alignment.get_reference_positions()
if(veryfirst==0):veryfirst=position[0]
first_pos=0
softclip=0
for i,e in enumerate(cigar):
if(e[0]==0):
if(first_pos==0):first_pos=position[0]
cigar_info=cigar_info+'M'*e[1]
cigar_pos=cigar_pos+e[1]
elif(e[0]==4):
softclip=e[1]
if(first_pos==0):
first_pos=position[0]-e[1]
for sftidx in range(0,e[1]):position=[first_pos+sftidx]+position
elif(cigar_pos>=len(position)):
position=position+[position[-1]+1]
else:
for sftidx in range(0,e[1]):position=position[0:cigar_pos]+[position[cigar_pos]+1]+position[cigar_pos:len(position)]
cigar_info=cigar_info+'S'*e[1]
cigar_pos=cigar_pos+e[1]
elif(e[0]==5):
cigar_info=cigar_info+'H'*e[1]
cigar_pos=cigar_pos+e[1]
elif(e[0]==1):
cigar_info=cigar_info+'I'*e[1]
if(position[0]+cigar_pos - veryfirst not in insertion_dict):insertion_dict[pileupread.alignment.query_name]=position[0]+cigar_pos-veryfirst
cigar_pos=cigar_pos+1
isinsertion=1
elif(e[0]==2):
cigar_info=cigar_info+'D'*e[1]
query_sequence=query_sequence[0:cigar_pos]+'D'*e[1]+query_sequence[cigar_pos:len(query_sequence)]
position=position[0:cigar_pos]+[0]*e[1]+position[cigar_pos:len(query_sequence)]
cigar_pos=cigar_pos+e[1]
indent=0
query=''
start_softclip,end_softclip=0,0
start_deletion,end_deletion=0,0
ref=pileupread.alignment.get_reference_sequence()
qual=str(pileupread.alignment.qual)
for q in range(0,len(query_sequence)):
nt=query_sequence[q]
if(cigar_info[q]=='S'):
query+='S'
if(q==start_softclip):start_softclip+=1
else:end_softclip+=1
elif(cigar_info[q]=='I'):continue
elif(cigar_info[q]=='D'):
query+=nt
qual=qual[0:q]+' '+qual[q:len(qual)]
else:query+=nt
ref=' '*(start_softclip)+ref+' '*(end_softclip)
if(first_pos-veryfirst>=0):
query=' '*(first_pos-veryfirst)+query
ref=' '*(first_pos-veryfirst)+ref
qual=' '*(first_pos-veryfirst)+qual
for i in range(first_pos-veryfirst,0):position=[position[0]-1]+position
else:
query=query[veryfirst-first_pos:len(query)]
ref=ref[veryfirst-first_pos:len(ref)]
qual=qual[veryfirst-first_pos:len(qual)]
position=position[veryfirst-first_pos:len(position)]
if(start_index==0):
if(pos_start not in position):start_index=position.index(pos_start-term)
else:start_index=position.index(pos_start)-term
qual=qual[start_index:start_index+term*2]
query=query[start_index:start_index+term*2]
ref=ref[start_index:start_index+term*2]
# print(query)
if(isinsertion==1):
if(insertion_dict[pileupread.alignment.query_name]<start_index or insertion_dict[pileupread.alignment.query_name]>start_index+30):del(insertion_dict[pileupread.alignment.query_name])
else:insertion_dict[pileupread.alignment.query_name]=insertion_dict[pileupread.alignment.query_name]-start_index
query_dict[pileupread.alignment.query_name]=Query(pileupread.alignment.query_name,cigar_info,query,qual,pileupread.alignment.mapq,position,indent,ref)
return([query_dict,insertion_dict,veryfirst])
def making_bam_array(query_dict,insertion_dict):
bam_array = np.zeros([64,64,3])
scoreDict = {'A':110, 'T': 140, 'G': 170, 'C': 200, 'D' : 230, ' ': 0, 'S':20, 'N':100, '*' : 100, "I" : 80 , "D" : 10}
readct = 0
for read in query_dict.keys():
for breakpoint in list(set(insertion_dict.values())):
if(breakpoint<len(query_dict[read].query) and query_dict[read].query[breakpoint]!=' '):
if(read in insertion_dict.keys()):
if(insertion_dict[read]==breakpoint):
query_dict[read].query=query_dict[read].query[0:breakpoint]+'I'+query_dict[read].query[breakpoint:len(query_dict[read].query)]
query_dict[read].ref=query_dict[read].ref[0:breakpoint]+'*'+query_dict[read].ref[breakpoint:len(query_dict[read].ref)]
query_dict[read].qual=query_dict[read].qual[0:breakpoint]+' '+query_dict[read].qual[breakpoint:len(query_dict[read].qual)]
else:
query_dict[read].query=query_dict[read].query[0:breakpoint]+'*'+query_dict[read].query[breakpoint:len(query_dict[read].query)]
query_dict[read].ref=query_dict[read].ref[0:breakpoint]+'*'+query_dict[read].ref[breakpoint:len(query_dict[read].ref)]
query_dict[read].qual=query_dict[read].qual[0:breakpoint]+' '+query_dict[read].qual[breakpoint:len(query_dict[read].qual)]
else:
query_dict[read].query=query_dict[read].query[0:breakpoint]+'*'+query_dict[read].query[breakpoint:len(query_dict[read].query)]
query_dict[read].ref=query_dict[read].ref[0:breakpoint]+'*'+query_dict[read].ref[breakpoint:len(query_dict[read].ref)]
query_dict[read].qual=query_dict[read].qual[0:breakpoint]+' '+query_dict[read].qual[breakpoint:len(query_dict[read].qual)]
else:
query_dict[read].query=query_dict[read].query[0:breakpoint]+' '+query_dict[read].query[breakpoint:len(query_dict[read].query)]
query_dict[read].ref=query_dict[read].ref[0:breakpoint]+' '+query_dict[read].ref[breakpoint:len(query_dict[read].ref)]
query_dict[read].qual=query_dict[read].qual[0:breakpoint]+' '+query_dict[read].qual[breakpoint:len(query_dict[read].qual)]
for read in query_dict.keys():
for pos in range(0, len(query_dict[read].query) ):
bam_array[readct, pos,0] = scoreDict[query_dict[read].query[pos].upper()]
bam_array[readct, pos,1] = scoreDict[query_dict[read].ref[pos].upper()]
readct += 1
return bam_array
def work(id, sliced_var_list, result):
for term in sliced_var_list :
try:
random_var = random.sample(list(range(0,64)),term[0])
if(term[0] == 2): output_path = "/home/sylee/GOTCHA/training/" + "snv"
elif(term[0] == 20): output_path = "/home/sylee/GOTCHA/training/" + "del"
elif(term[0] == 50): output_path = "/home/sylee/GOTCHA/training/" + "ins"
for r in random_var:
pos_start = int(term[-1][1]) + r
print(term[4])
mother= making_query_dict(term[3],term[-1][0],pos_start)
father= making_query_dict(term[2],term[-1][0],pos_start)
proband= making_query_dict(term[1],term[-1][0],pos_start)
family_dict={**mother[1],**father[1],**proband[1]}
print(mother[2],father[2],proband[2])
mother_array=making_bam_array(mother[0],family_dict)
proband_array=making_bam_array(proband[0],family_dict)
father_array=making_bam_array(father[0],family_dict)
cv2.imwrite(output_path + "/father/%s_s.%d_vp.%s"%(term[-1][0],pos_start,term[-1][1]) + ".png",father_array)
cv2.imwrite(output_path + "/mother/%s_s.%d_vp.%s"%(term[-1][0],pos_start,term[-1][1]) + ".png",mother_array)
cv2.imwrite(output_path + "/proband/%s_s.%d_vp.%s"%(term[-1][0],pos_start,term[-1][1]) + ".png",proband_array)
except ValueError:
continue
except IndexError:
continue
return id
class inputData:
def __init__(self, proband, father, mother, bam_path):
self.proband = proband
self.father = father
self.mother = mother
self.bam_path = bam_path
self.snv_ti = ''
self.ins = ''
self.snv_tv = ''
self.deletion = ''
def show_info(self):
print(self.proband, self.father, self.mother, self.bam_path)
def path_match(self, variant_file_list):
for file in variant_file_list:
if "Ti" in file : self.snv_ti = open(file,'r').readlines()
elif "Tv" in file : self.snv_tv = open(file,'r').readlines()
elif "DEL" in file : self.deletion = open(file,'r').readlines()
elif "INS" in file : self.ins = open(file,'r').readlines()
if __name__ == "__main__":
print("###########################################################################")
table_list = ["/EXTDATA/mhchoi/Denovo_mutation_project/Control_trio_vs_ABS_Analysis/Directly_blood_draw/2020/2nd" ,
"/EXTDATA/mhchoi/Denovo_mutation_project/Control_trio_vs_ABS_Analysis/Directly_blood_draw/2021/1st",
"/EXTDATA/mhchoi/Denovo_mutation_project/Control_trio_vs_ABS_Analysis/RareDisease_dataSet",
"/EXTDATA/mhchoi/Denovo_mutation_project/Atomic_Bomb_Survivors_Analysis/2021"]
family_dnmfile_dict = dict()
var_list = []
for path in table_list:
for specified_path in glob.glob(path + '/*'):
bamlist = glob.glob(specified_path + "/**/*.final.bam", recursive = True)
for more_specified_path in glob.glob(specified_path +'/*.T*'):
if 'UNIST' in more_specified_path :
tag = '_'.join(more_specified_path.split('/')[-3:])
more_specified_path = more_specified_path + '/1.Processing'
variant_file_list = glob.glob(more_specified_path + '/dSNV_dINDEL_final_table/*TRUE.table' )
elif '2021' in more_specified_path:
tag = '_'.join(more_specified_path.split('/')[-4:])
variant_file_list = glob.glob(more_specified_path + "/dSNV_dINDEL_final_table/*TRUE.table")
else :
tag = '_'.join(more_specified_path.split('/')[-4:])
variant_file_list = glob.glob(more_specified_path + '/Reproducibility_test/dSNV_dINDEL_final_table/*TRUE.table' )
#print(variant_file_list)
ped = open(glob.glob(more_specified_path + '/*.ped')[0], 'r').readlines()[-1].split('\t')
mom_path = ""
dad_path = ""
proband_path = ""
if(len(bamlist) != 0) :
for path in bamlist:
if ped[1] in path: proband_path = path
elif ped[2] in path : dad_path = path
elif ped[3] in path : mom_path = path
for var in variant_file_list:
file = open(var,'r').readlines()
if ".dSNV" in var :
for line in file:
var_list.append([2, proband_path, dad_path, mom_path, line.split('\t')[0:2]])
elif ".dDEL" in var :
for line in file:
var_list.append([20, proband_path, dad_path, mom_path, line.split('\t')[0:2]])
elif ".dINS" in var :
for line in file:
var_list.append([50, proband_path, dad_path, mom_path, line.split('\t')[0:2]])
print(len([x for x in var_list if x[0] == 2]))
print(len([x for x in var_list if x[0] == 20]))
print(len([x for x in var_list if x[0] == 50]))
result=Queue()
procs=[]
START,END=0,len(var_list)
interval = END // 10
for term in range(0,10):
print(interval * term)
procs.append(Process(target=work, args=(term, var_list[interval * term : interval * (term + 1)], result)))
procs[-1].start()
for q in range(0,10):
procs[q].join()
| sylee623/GOTCHA | src/training_making/bam_array_generator_for_train.py | bam_array_generator_for_train.py | py | 14,268 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pysam.AlignmentFile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"li... |
11603680922 | '''
model training pseudocode:
load dataset
transform dataset
for 100 iterations:
split dataset into 3 equal parts
assign trainingset 2/3 parts
assign testingset 1/3 part
find size n of trainingset
#perform leave-one_out cross-validation
For n iterations:
assign validationset unique 1/n part (i.e. not selected previously)
train model on remaining parts using cross-validation
validate model on validationset
determine model score
determine average model score
'''
import pandas as pd
from sklearn import svm, linear_model
from sklearn.model_selection import cross_validate, KFold
def prepare_dataset(filename):
dataset = pd.read_csv(filename, delimiter='\t') # input is a .txt file
dataset['Chromosome'] = dataset.apply(lambda row: # concatenate 3 columns into 1
"%d_%d_%d"%(row['Chromosome'], row['Start'], row['End']), axis=1)
dataset.rename(columns={'Chromosome':'Sample'}, inplace=True)
dataset.drop(['Start', 'End', 'Nclone'], axis=1, inplace=True)
dataset = dataset.T # transpose as it's more useful to have the call data match the clinical data format
dataset.columns = dataset.iloc[0] # assign 1st row of data as column headers
dataset = dataset[1:] # then ignore the first row
return dataset
def prepare_target(filename):
target = pd.read_csv(filename, delimiter='\t')
target.set_index('Sample', inplace=True)
return target
if __name__ == "__main__":
dataset = prepare_dataset('data/train_call.txt')
target = prepare_target('data/train_clinical.txt')
classifier = svm.SVC(kernel='linear', gamma='auto')
cv = KFold(n_splits=10)
result = cross_validate(classifier, dataset, target["Subgroup"], cv=cv)
print(result)
print("mean score: ", result['test_score'].mean())
| theohal/BfTM | sarah/assignment_slp_v1.py | assignment_slp_v1.py | py | 1,831 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"li... |
11960533630 | import pickle
import cv2
import dlib
import os
import numpy as np
import base64
# initialize dlib's face detector (HOG-based) and then create the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')
# load the face recognition model
face_recognition_model = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')
def detect_face_rects(image):
return detector(image, 1)
def detect_face_landmarks(image, rect):
return predictor(image, rect)
# calcule la ressemblance entre deux visages
def calc_similarity(encodings1, encoding2):
# compute the Euclidean distance between the two encodings of list of arrays
return 1 - np.linalg.norm(encodings1 - encoding2)
# tester le ressemblance entre deux visages
def similar(encodings1, encoding2, threshold=0.4):
return calc_similarity(encodings1, encoding2) >= threshold
ROOT_DIR = 'photos'
class Picture:
def __init__(self, path, encodings, faces, width, height):
self.path = path
self.encodings = encodings
self.faces = faces
self.width = width
self.height = height
def __str__(self):
return self.path
def to_dict(self):
return { 'path': self.path, 'width': self.width, 'height': self.height, 'faces': [[r.left(), r.top(), r.width(), r.height()] for r in self.faces] }
@staticmethod
def from_image(image, path=None):
rects = detect_face_rects(image)
encodings = [np.array(face_recognition_model.compute_face_descriptor(image, detect_face_landmarks(image, rect), 1)) for rect in rects]
return Picture(path, encodings, rects, image.shape[1], image.shape[0])
@staticmethod
def from_file(path, max_pixels=1280*720):
image = cv2.imread(path)
if max_pixels != None and image.shape[0] * image.shape[1] > max_pixels:
scale_factor = np.sqrt(max_pixels / (image.shape[0] * image.shape[1]))
image = cv2.resize(image, (0, 0), fx=scale_factor, fy=scale_factor)
return Picture.from_image(image, path)
@staticmethod
def from_base64(base64pic, path=None):
# TODO : ne fonctionne pas :/
#image = imread(io.BytesIO(base64.b64decode(base64pic)))
#image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
decode = base64.b64decode(base64pic)
print(type(decode))
arr = np.frombuffer(decode, np.uint8)
print(type(arr))
image = cv2.imdecode(arr, 0)
print(type(image))
return Picture.from_image(image, path)
class Album:
def __init__(self, name):
self.name = name
self.pictures = []
if os.path.isfile(ROOT_DIR + os.sep + self.name + '.pickle'):
# si le fichier pickle existe, on le charge
self.load()
else:
# sinon on le crée
self.update()
def save(self):
output = open(ROOT_DIR + os.sep + self.name + '.pickle', 'wb')
# on parcourt tous les dossiers du dataset
for picture in self.pictures:
pickle.dump(picture, output)
output.close()
def load(self):
with open(ROOT_DIR + os.sep + self.name + '.pickle', 'rb') as f:
try:
self.pictures = []
while True:
package = pickle.load(f)
self.pictures.append(package)
except EOFError:
pass
def update(self):
print('updating album', self.name)
self.pictures = []
# on parcourt toutes les images du dossier
for image_name in os.listdir(ROOT_DIR + os.sep + self.name):
print('\t', image_name)
self.pictures.append(Picture.from_file(ROOT_DIR + os.sep + self.name + os.sep + image_name))
self.save()
def get_search_by_face(self, faceEncoding):
result_pictures = []
for id, picture in enumerate(self.pictures):
faces = []
for eId, e in enumerate(picture.encodings):
faces.append({ 'id': eId, 'similarity': calc_similarity([ faceEncoding ], e) })
result_pictures.append({ 'id': id, 'faces': faces })
result_pictures.sort(key=lambda p: -max((f['similarity'] for f in p['faces']), default=0))
return result_pictures
def to_dict(self):
return { 'name': self.name, 'pictures': [ p.to_dict() for p in self.pictures ] }
def get_albums():
albums = {}
for entry in os.scandir(ROOT_DIR):
if entry.is_dir():
albums[entry.name] = Album(entry.name)
return albums
if __name__ == '__main__':
get_albums() | ptlc8/zoom-visages | detect.py | detect.py | py | 4,771 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dlib.shape_predictor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dlib.face_recognition_model_v1",
"line_number": 13,
"usage_type": "call"
},
{
... |
28825101443 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 3 23:28:26 2016
@author: Guanhao Wu
"""
from Neuron import Neuron
from NeuronParameters import NeuronParameters
import matplotlib.pyplot as plt
import numpy as np
T=60000
V=[]
N=Neuron()
P=NeuronParameters()
P.set_RS(N)
for t in range(T):
if t>50:
N.clamp_input(85)
N.timestep()
V.append(N.V)
plt.figure()
plt.plot(np.linspace(0,T*N.dt,T),V,'r')
plt.show()
| higgamo/PNN | SingleNeuron.py | SingleNeuron.py | py | 471 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Neuron.Neuron",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "NeuronParameters.NeuronParameters",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 26,
"usage_type": "call"
},
{
"api_name... |
30579027002 | import datetime
from cryptography.x509 import *
from cryptography.x509 import OID_COMMON_NAME
from cryptography.hazmat.primitives import serialization
from certificate_authority.database import db
from certificate_authority.models import *
from os import urandom
from utils.signing import get_default_encryption_padding, get_default_hash
def issue_certificate(csr: CertificateSigningRequest) -> Certificate:
with open("certificate_authority/assets/key.pem", "rb") as file:
key = serialization.load_pem_private_key(file.read(), None)
with open("certificate_authority/assets/certificate.pem", "rb") as file:
cert = load_pem_x509_certificate(file.read())
cert = (CertificateBuilder(extensions=csr.extensions)
.subject_name(csr.subject)
.issuer_name(cert.issuer)
.public_key(csr.public_key())
.serial_number(random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=1 * 365))
# Sign our certificate with our private key
.sign(key, get_default_hash()))
db.session.add(
IssuedCertificate(
common_name=(cert.subject.get_attributes_for_oid(
OID_COMMON_NAME)[0].value),
cert=cert.public_bytes(serialization.Encoding.PEM)))
db.session.commit()
return cert
def define_sign_request(csr_data: bytes):
csr = load_pem_x509_csr(csr_data)
assert csr.is_signature_valid
try:
common_name = (csr.subject.get_attributes_for_oid(
OID_COMMON_NAME)[0].value)
except IndexError:
raise ValueError("Common name is not available.")
# session = scoped_session(Session)
request = SignRequest(
csr=csr.public_bytes(serialization.Encoding.PEM),
secret_message=urandom(128),
expiration_time=datetime.datetime.utcnow() + datetime.timedelta(hours=1),
trust_address=common_name)
db.session.add(request)
db.session.commit()
return (
request.id,
request.trust_address,
csr.public_key().encrypt(
request.secret_message,
get_default_encryption_padding()))
def verify_decrypted_message_and_issue(id: str, message: bytes) -> bytes:
request: SignRequest = db.session.query(SignRequest).get(id)
assert request.expiration_time > datetime.datetime.utcnow()
assert request.secret_message == message
return issue_certificate(load_pem_x509_csr(request.csr)).public_bytes(serialization.Encoding.PEM)
| momvart/dns_project | certificate_authority/cert_issuer.py | cert_issuer.py | py | 2,592 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"li... |
26398398645 | # pylint: disable=missing-module-docstring
# pylint: disable=import-error
import json
from collections import namedtuple
from constants.isa_const import Opcode
class Term(namedtuple('Term', 'pos symbol')):
"""Описание выражения из исходного текста программы."""
# сделано через класс, чтобы был docstring
def write_code(filename, code):
"""Записать машинный код в файл."""
with open(filename, "w", encoding="utf-8") as file:
file.write(json.dumps(code, indent=4))
code.clear()
def read_code(filename):
"""Прочесть машинный код из файла."""
with open(filename, encoding="utf-8") as file:
code = json.loads(file.read())
for instr in code:
instr['opcode'] = Opcode(instr['opcode']).value
return code
| buffer404/university | year3/Computer architecture/lab3/constants/isa.py | isa.py | py | 887 | python | ru | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "constants.isa_const.Opcod... |
18456550160 | import argparse
import sys
def foltycalc(args):
if args.c=="add":
if args.a==78 or args.a==23 and args.b==23 or args.b==78:
return print(f"{args.a}+{args.b} is :",666)
else:
return print(f"{args.a}+{args.b} is :",args.a+args.b)
elif args.c=="sub":
if args.a==45 or args.a==12 and args.b==12 or args.b==45:
return print(f"{args.a}+{args.b} is :",79)
else:
return print(f"{args.a}-{args.b} is :",args.a-args.b)
elif args.c=="mul":
if args.a==56 or args.a==3 and args.b==3 or args.b==56:
return print(f"{args.a}+{args.b} is :",555)
else:
return print(f"{args.a}x{args.b} is :",args.a*args.b)
elif args.c=="div":
if args.a==45 or args.a==6 and args.b==6 or args.b==45:
return print(f"{args.a}+{args.b} is :",80)
else:
return print(f"{args.a}/{args.b} is :",args.a/args.b)
elif args.c=="per":
if args.a==34 or args.a==4 and args.b==4 or args.b==34:
return print(f"{args.a}%{args.b} is :",78)
else:
return print(f"{args.a}%{args.b} is :",args.a%args.b)
else:
return print("Somthig is wrong please take and reEnter")
if __name__=='__main__':
parser=argparse.ArgumentParser()
parser.add_argument('--a',type=float,default=1.0,help="this is a utility for calculatin,please contract with kksingh.")
parser.add_argument('--b',type=float,default=3.0,help="this is a utility for calculatin,please contract with kksingh.")
parser.add_argument('--c',type=str,default="add",help="this is a utility for calculatin,please contract with kksingh.")
args=parser.parse_args()
sys.stdout.write(str(foltycalc(args)))
| kkstech1986/kksingh_soft | fultycalculater_CMD.py | fultycalculater_CMD.py | py | 1,886 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 41,
"usage_type": "attribute"
}
] |
9466511067 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# load dataset
df = pd.read_csv("./project/feature_data.csv")
df.head()
# index number 제거
df.drop(["Unnamed: 0"], axis=1, inplace=True)
X = df.drop(["ET"], axis=1)
y = df["ET"]
n_1 = np.sum(y==1)
n_0 = np.sum(y==0)
print(n_0, n_1)
r_state = 42
# Under sampling: Random 1:1 ratio
from imblearn.under_sampling import RandomUnderSampler
ratio = 'auto'
X_res, y_res = RandomUnderSampler(ratio=ratio, random_state=r_state).fit_sample(X,y)
y_res.shape
# Under sampling: Random 3:1 ratio
ratio = {0: n_1*3, 1: n_1}
X_res, y_res = RandomUnderSampler(ratio=ratio, random_state=r_state).fit_sample(X,y)
y_res.shape#
# Over sampling: Random 1:1 ratio
ratio = 'auto'
from imblearn.over_sampling import RandomOverSampler
X_res, y_res = RandomOverSampler(ratio=ratio, random_state=r_state).fit_sample(X,y)
y_res.shape
# Over sampling: Random 3 times
ratio = {0: n_0, 1: n_1*3}
from imblearn.over_sampling import RandomOverSampler
X_res, y_res = RandomOverSampler(ratio=ratio, random_state=r_state).fit_sample(X,y)
y_res.shape
# Over sampling: SMOTE 1:1 ratio
ratio = 'auto'
from imblearn.over_sampling import SMOTE
X_res, y_res = SMOTE(ratio=ratio, random_state=r_state).fit_sample(X,y)
y_res.shape
# Over sampling: Random 3 times
ratio = {0: n_0, 1: n_1*3}
from imblearn.over_sampling import SMOTE
X_res, y_res = SMOTE(ratio=ratio, random_state=r_state).fit_sample(X,y)
y_res.shape
from sklearn import datasets
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from imblearn import over_sampling as os
from imblearn import pipeline as pl
from imblearn.metrics import classification_report_imbalanced
print(__doc__)
RANDOM_STATE = 42
# Generate a dataset
X, y = datasets.make_classification(n_classes=2, class_sep=2,
weights=[0.1, 0.9], n_informative=10,
n_redundant=1, flip_y=0, n_features=20,
n_clusters_per_class=4, n_samples=5000,
random_state=RANDOM_STATE)
pipeline = pl.make_pipeline(os.SMOTE(random_state=RANDOM_STATE),
LinearSVC(random_state=RANDOM_STATE))
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=RANDOM_STATE)
# Train the classifier with balancing
pipeline.fit(X_train, y_train)
# Test the classifier and get the prediction
y_pred_bal = pipeline.predict(X_test)
# Show the classification report
print(classification_report_imbalanced(y_test, y_pred_bal))
| tyami/naver-competition-study | project/02_resampling.py | 02_resampling.py | py | 2,660 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "imblearn.under_sampling.RandomUnde... |
15238091678 | from django.urls import path
from . import views
urlpatterns = [
path('', views.review, name='review'),
path('post_review/', views.post_review, name='post_review'),
path('review_list/', views.review_list, name='review_list'),
path('game_reviews/<game_id>', views.game_reviews, name='game_reviews'),
] | fergabi17/gemotion | reviews/urls.py | urls.py | py | 318 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
25369895893 | import os
import configparser
class ReadConfig(object):
def __init__(self):
self.project_path = None
self.config_path = None
self.config = configparser.ConfigParser()
def _set_project_path(self):
# 获取项目地址
try:
self.project_path = os.path.split(os.path.abspath(__file__))[0]
except Exception as e:
raise(e)
def _set_config_path(self):
self._set_project_path()
self.config_path = os.path.join(self.project_path, 'config.ini')
def get_project_info(self, project_name, field_name):
try:
vaule = self.config.get(project_name, field_name)
except Exception as e:
raise(e)
return vaule | weixingdemeng/weixingdemeng.github.io | autotest/read_config.py | read_config.py | py | 746 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",... |
18119046702 | #!/usr/bin/python3
import aiohttp_cors
import os
import aiohttp
import concurrent.futures
import asyncio
class EyesWebServer():
def __init__(self):
self.app_path="/opt/eyes"
self.web_path="/opt/eyes/web/eyes"
self.web_address="home.dayton.tech"
self.web_port=8080
self.sse_enabled = False
self.loop = asyncio.get_event_loop()
def start(self, beta=False):
try:
self.web_server = aiohttp.web.Application()
self.web_server.router.add_get('/', self.root_handler)
self.web_server.router.add_static('/eyes', path=self.web_path, append_version=True)
if self.sse_enabled:
self.web_server.router.add_get('/sse', self.sse_handler)
self.cors = aiohttp_cors.setup(self.web_server, defaults={
"*": aiohttp_cors.ResourceOptions(allow_credentials=True, expose_headers="*", allow_methods='*', allow_headers="*") })
for route in self.web_server.router.routes():
self.cors.add(route)
self.loop.run_until_complete(self.start_web_server())
self.loop.run_forever()
#else:
# self.error_state=True
except KeyboardInterrupt: # pragma: no cover
pass
except:
logger.error('Loop terminated', exc_info=True)
finally:
self.server.shutdown()
async def start_web_server(self):
self.runner = aiohttp.web.AppRunner(self.web_server)
await self.runner.setup()
self.ssl_context = None
self.site = aiohttp.web.TCPSite(self.runner, self.web_address, self.web_port, ssl_context=self.ssl_context)
await self.site.start()
async def root_handler(self, request):
return aiohttp.web.FileResponse('/opt/eyes/web/eyes/index.html')
async def sse_handler(self, request):
pass
if __name__ == '__main__':
eyes = EyesWebServer()
eyes.start()
| tagdara/eyes | server/eyes.py | eyes.py | py | 1,998 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.Application",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "aioht... |
71176138595 | import asyncio
import time
from random import randint
from sys import argv
import web3 as w3
from web3.middleware import geth_poa_middleware
from solcx import compile_source
class Node:
def __init__(self, addr, password, addresses, account_limit, to_send,
port):
self.web3 = w3.Web3(
w3.Web3.WebsocketProvider('ws://localhost:' + str(port)))
self.my_addr = w3.Web3.toChecksumAddress(addr)
self.web3.geth.personal.unlock_account(self.my_addr, password)
self.index = addresses.index(self.my_addr)
self.web3.middleware_onion.inject(geth_poa_middleware, layer=0)
self.addresses = addresses
self.account_limit = account_limit
self.nonce = 0
self.to_send = to_send
self.total = 0
self.pending = []
self.block_filter = self.web3.eth.filter('latest')
compiled_sol = compile_source(
'''
pragma solidity >0.5.0;
contract Greeter {
string public greeting;
constructor() public {
greeting = 'Hello';
}
function setGreeting(string memory _greeting) public {
greeting = _greeting;
}
function greet() view public returns (string memory) {
return greeting;
}
}
''',
output_values=['abi', 'bin']
)
contract_id, contract_interface = compiled_sol.popitem()
bytecode = contract_interface['bin']
abi = contract_interface['abi']
Greeter = self.web3.eth.contract(abi=abi, bytecode=bytecode)
tx_hash = Greeter.constructor().transact({
'from': self.my_addr,
'gas': 1000000,
'nonce': self.nonce
})
self.nonce += 1
tx_receipt = self.web3.eth.wait_for_transaction_receipt(tx_hash)
self.greeter = self.web3.eth.contract(
address=tx_receipt.contractAddress, abi=abi)
def rand_addr(self):
r = randint(0, len(self.addresses) - 2)
if r >= self.index:
r += 1
return self.addresses[r]
def send(self):
return self.greeter.functions.greet().call({'from': self.my_addr})
async def run(self, poll_interval):
t = time.time()
while True:
if time.time() - t > 30:
return
_ = self.send()
self.nonce += 1
await asyncio.sleep(poll_interval)
def start(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
t1 = time.time()
try:
loop.run_until_complete(self.run(1))
finally:
loop.close()
return time.time() - t1
addresses = [
w3.Web3.toChecksumAddress('0xe2ddab5e77df6d62f8661650e46d695be1963bf7'),
w3.Web3.toChecksumAddress('0xd18aefd325d127fe3e1d6272180a8629413ddc6b'),
w3.Web3.toChecksumAddress('0xcf7d7b22af30aadce47930cd234ed34c4488da5e'),
w3.Web3.toChecksumAddress('0x82aa48615b89237a0195441da44a63dcbf199f21'),
w3.Web3.toChecksumAddress('0x12c825237c38cfe2f879fcd475cb438ed0778d8e'),
w3.Web3.toChecksumAddress('0xdee5bc6e1c404c693c0fcf145dcfcb64330eb8bd'),
w3.Web3.toChecksumAddress('0xec317a80394abb23c8940b2b7f2d66e0e3c97677'),
w3.Web3.toChecksumAddress('0xb48bd20a8c8e687511e36df039c17b8704c2c115'),
]
account_limit = int(argv[3])
to_send = int(argv[4])
node = Node(argv[1], argv[2], addresses, account_limit, to_send, argv[5])
node.start()
| coltonfike/e2c | testnet/contract.py | contract.py | py | 3,596 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "web3.Web3",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "web3.Web3.WebsocketProvider",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "web3.Web3.toChec... |
32738212165 | import argparse
from colorama import init
from .common import Messages, Path, check_if_exists
from .. import Havok
def parse_args():
parser = argparse.ArgumentParser(description="Convert Havok packfile to JSON")
parser.add_argument("hkFile", type=Path, help="Path to a Havok packfile")
parser.add_argument(
"outFile", type=Path, help="Path to destination JSON file", nargs="?"
)
parser.add_argument(
"-p", "--pretty-print", help="Pretty-print the JSON file", action="store_true"
)
return parser.parse_args()
def hk_to_json(hkFile: Path, outFile: Path, pretty_print: bool):
if not outFile:
outFile = hkFile.with_suffix(".json")
check_if_exists(outFile)
Messages.loading(hkFile)
hk = Havok.from_file(hkFile)
Messages.deserializing(hkFile)
hk.deserialize()
Messages.writing(outFile)
hk.to_json(outFile, pretty_print=pretty_print)
def main():
init(autoreset=True)
args = parse_args()
hk_to_json(args.hkFile, args.outFile, args.pretty_print)
Messages.done()
if __name__ == "__main__":
main()
| krenyy/botw_havok | botw_havok/cli/hk_to_json.py | hk_to_json.py | py | 1,112 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "common.Path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "common.Path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "common.Path",
"li... |
22083627803 | import pika
import sys
import time
# 远程rabbitmq服务的配置信息
username = 'chris' # 指定远程rabbitmq的用户名密码
pwd = '123456'
ip_addr = '172.18.99.177'
# rabbitmq 报错 pika.exceptions.IncompatibleProtocolError: StreamLostError: (‘Transport indicated EOF’,) 产生此报错的原因是我将port写成了15672
# rabbitmq需要通过端口5672连接 - 而不是15672. 更改端口,转发,一切正常
port_num = 5672
# 消息队列服务的连接和队列的创建
credentials = pika.PlainCredentials(username, pwd)
connection = pika.BlockingConnection(pika.ConnectionParameters(ip_addr, port_num, '/', credentials))
channel = connection.channel()
# 创建一个名为balance的队列,对queue进行durable持久化设为True(持久化第一步)
channel.queue_declare(queue='work_queue', durable=True)
message_list = ['中山眼科','市一','省医']
for item in message_list:
# n RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange.
channel.basic_publish(
exchange='',
routing_key='work_queue', # 写明将消息发送给队列balance
body=item, # 要发送的消息
properties=pika.BasicProperties(delivery_mode=2, ) # 设置消息持久化(持久化第二步),将要发送的消息的属性标记为2,表示该消息要持久化
) # 向消息队列发送一条消息
print(" [%s] Sent %s" % (time.strftime('%H:%M:%S'), item))
# time.sleep(0.2)
connection.close() # 关闭消息队列服务的连接 | chriskowk/PycharmProjects | PyQtTest/sendmq.py | sendmq.py | py | 1,549 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pika.PlainCredentials",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pika.BlockingConnection",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pika.ConnectionParameters",
"line_number": 15,
"usage_type": "call"
},
{
"api_name"... |
8529218144 | import os
import numpy as np
from netCDF4 import Dataset
from lisfloodutilities.readers import PCRasterMap
from lisfloodutilities.pcr2nc import convert
from . import TestWithCleaner
class TestPcr2nc(TestWithCleaner):
def test_convert(self):
dataset = 'tests/data/folder_d'
out = 'tests/data/pcr2nc_test.nc'
self.cleanups.append((os.unlink, (out,)))
metadata = {
'format': 'NETCDF4',
'variable': {
'shortname': 'map',
'units': 'm',
'least_significant_digit': 2,
'compression': 9,
},
'geographical': {
'datum': 'WGS84'
},
'source': 'JRC E1',
'reference': 'JRC E1',
'time': {
'calendar': 'proleptic_gregorian',
'units': 'days since 1999-01-01'
}
}
map_0 = PCRasterMap('tests/data/folder_d/map.001')
map_1 = PCRasterMap('tests/data/folder_d/map.002')
map_2 = PCRasterMap('tests/data/folder_d/map.003')
map_3 = PCRasterMap('tests/data/folder_d/map.004')
convert(dataset, out, metadata)
with Dataset(out) as nc:
time_arr = nc.variables['time'][:]
lat_arr = nc.variables['y'][:]
lon_arr = nc.variables['x'][:]
assert time_arr.size == 4
assert (lat_arr.size, lon_arr.size) == (35, 35)
var_0 = nc.variables['map'][0, :, :]
var_1 = nc.variables['map'][1, :, :]
var_2 = nc.variables['map'][2, :, :]
var_3 = nc.variables['map'][3, :, :]
assert np.allclose(map_0.data, var_0)
assert np.allclose(map_1.data, var_1)
assert np.allclose(map_2.data, var_2)
assert np.allclose(map_3.data, var_3)
map_0.close()
map_1.close()
map_2.close()
map_3.close()
| ec-jrc/lisflood-utilities | tests/test_pcr2nc.py | test_pcr2nc.py | py | 1,941 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "os.unlink",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "lisfloodutilities.readers.PCRasterMap",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "lisfloodutilities.readers.PCRasterMap",
"line_number": 39,
"usage_type": "call"
},... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.