seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42432458928 | from django.shortcuts import get_object_or_404
from django.contrib.auth import authenticate
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.db.models import Q
from uuid import uuid4
from rest_framework import serializers, exceptions, validators
from rest_framework.authtoken.models import Token
from .models import CustomUser, UserLanguage, Experience
from .validators import validate_phone_and_email
class RegisterSerializer(serializers.Serializer):
username = serializers.CharField(validators=[validate_phone_and_email])
password = serializers.CharField(write_only=True)
is_staff = serializers.BooleanField(default=False)
def validate(self, attrs):
is_staff = attrs['is_staff']
if is_staff:
user = self.context['request'].user
if not user.is_authenticated:
raise exceptions.NotAuthenticated()
if not user.is_staff:
raise exceptions.PermissionDenied()
username = attrs['username']
if '@' in username:
attrs['email'] = username
else:
attrs['phone'] = username
attrs['username'] = str(uuid4())[-12:]
return attrs
def create(self, validated_data):
instance = CustomUser.objects.create_user(**validated_data)
return instance
class UserLanguageSerializer(serializers.ModelSerializer):
class Meta:
model = UserLanguage
fields = ['language', 'level']
class UserExperienceSerializer(serializers.ModelSerializer):
class Meta:
model = Experience
exclude = ['user', 'date_created']
def validate(self, attrs):
if attrs['work_end_date'] and attrs['work_now']:
return exceptions.ValidationError("You can't enter both of\
'work end time' and 'work now' fields")
return attrs
class UserSerializer(serializers.ModelSerializer):
languages = UserLanguageSerializer(many=True, required=False)
experiences = UserExperienceSerializer(many=True, required=False)
date_joined = serializers.DateTimeField(format='%Y-%m-%d %H:%M', read_only=True)
class Meta:
model = CustomUser
fields = (
'username', 'first_name', 'last_name', 'email', 'phone', 'balance',
'date_joined', 'about', 'birth_date', 'avatar', 'other_skills', 'hobby',
'resume', 'edu1_name', 'edu1_direction', 'edu1_start_date', 'edu1_end_date',
'edu1_now', 'edu2_name', 'edu2_direction', 'edu2_start_date', 'edu2_end_date',
'edu2_now', 'licence_category', 'languages', 'experiences'
)
read_only_fields = ('languages', 'experiences')
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ('username', 'first_name', 'email', 'phone', 'avatar')
# only for admin serializers
class UserAdminListSerializer(serializers.ModelSerializer):
date_joined = serializers.DateTimeField(format='%Y-%m-%d %H:%M', read_only=True)
url = serializers.HyperlinkedIdentityField(
lookup_field='username',
view_name='adminuser-detail',
read_only=True,
)
class Meta:
model = CustomUser
fields = (
'username', 'email', 'phone', 'date_joined', 'url'
)
class UserAdminUpdateASerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = ('is_active', 'is_deleted')
| IslombekOrifov/olx | src/api/v1/accounts/serializers.py | serializers.py | py | 3,579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 16,
"usage_ty... |
6319273494 | import theano
import theano.tensor as T
from .utils import floatX
from .layers import l2norm
# ------------------------
# Regularization
# ------------------------
def clip_norm(grad, clip, norm):
if clip > 0:
grad = T.switch(T.ge(norm, clip), grad * clip / norm, grad)
return grad
def clip_norms(grads, clip):
norm = T.sqrt(sum([T.sum(grad ** 2) for grad in grads]))
return [clip_norm(grad, clip, norm) for grad in grads]
# Base regularizer
class Regularizer(object):
def __init__(self, l1=0., l2=0., maxnorm=0., l2norm=False, frobnorm=False):
self.__dict__.update(locals())
def max_norm(self, param, maxnorm):
if maxnorm > 0:
norms = T.sqrt(T.sum(T.sqr(param), axis=0))
desired = T.clip(norms, 0, maxnorm)
param = param * (desired / (1e-7 + norms))
return param
def l2_norm(self, param):
return param / l2norm(param, axis=0)
def frob_norm(self, param, nrows):
return (param / T.sqrt(T.sum(T.sqr(param)))) * T.sqrt(nrows)
def gradient_regularize(self, param, grad):
grad += param * self.l2
grad += T.sgn(param) * self.l1
return grad
def weight_regularize(self, param):
param = self.max_norm(param, self.maxnorm)
if self.l2norm:
param = self.l2_norm(param)
if self.frobnorm > 0:
param = self.frob_norm(param, self.frobnorm)
return param
# ------------------------
# Updates
# ------------------------
class Update(object):
def __init__(self, regularizer=Regularizer(), clipnorm=0.):
self.__dict__.update(locals())
def __call__(self, params, grads):
raise NotImplementedError
# Stochastic Gradient Descent
class SGD(Update):
def __init__(self, lr=0.01, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
updated_param = param - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with momentum
class Momentum(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updates.append((m, v))
updated_param = param + v
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with Nesterov Accelerated Gradient
class Nesterov(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updated_param = param + self.momentum * v - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((m, v))
updates.append((param, updated_param))
return updates
# RMS Prop
class RMSprop(Update):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
updated_param = param - self.lr * (grad / T.sqrt(acc_new + self.epsilon))
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# Adam
class Adam(Update):
def __init__(self, lr=0.001, b1=0.9, b2=0.999, e=1e-8, l=1 - 1e-8, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
t = theano.shared(floatX(1.))
b1_t = self.b1 * self.l ** (t - 1)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = theano.shared(param.get_value() * 0.)
m_t = b1_t * m + (1 - b1_t) * grad
v_t = self.b2 * v + (1 - self.b2) * grad ** 2
m_c = m_t / (1 - self.b1 ** t)
v_c = v_t / (1 - self.b2 ** t)
p_t = param - (self.lr * m_c) / (T.sqrt(v_c) + self.e)
p_t = self.regularizer.weight_regularize(p_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((param, p_t))
updates.append((t, t + 1.))
return updates
# AdaGrad
class Adagrad(Update):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_t = acc + grad ** 2
updates.append((acc, acc_t))
p_t = param - (self.lr / T.sqrt(acc_t + self.epsilon)) * grad
p_t = self.regularizer.weight_regularize(p_t)
updates.append((param, p_t))
return updates
# AdeDelta
class Adadelta(Update):
def __init__(self, lr=0.5, rho=0.95, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_delta = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
update = grad * T.sqrt(acc_delta + self.epsilon) / T.sqrt(acc_new + self.epsilon)
updated_param = param - self.lr * update
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
acc_delta_new = self.rho * acc_delta + (1 - self.rho) * update ** 2
updates.append((acc_delta, acc_delta_new))
return updates
# No updates
class NoUpdate(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
for param in params:
updates.append((param, param))
return updates
| maym2104/ift6266-h17-project | lib/updates.py | updates.py | py | 8,249 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "theano.tensor.switch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "theano.tensor",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "theano.tensor.ge",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "theano.tensor.sqr... |
72718749224 | import numpy as np
#from pyGITR.math_helper import *
from typing import Callable
import matplotlib.pyplot as plt
import pydoc
import netCDF4
import os
class Sputtering_and_reflection():
def ShowAvailableProjectiles(self):
for D in ['H', 'D', 'T', 'He4', 'Si', 'C', 'W']:
print(D)
def ShowAvailableTargets(self):
for D in ['C', 'Si', 'W', 'SiC']:
print(D)
# these parameter values are hard-coded. Want to put it in a text file separately
@classmethod
def Set_PhysicalSputteringParameters(cls, Projectile, Target):
if Projectile == 'H' and Target =='C':
cls.lambda_parameter = 1.3533
cls.q_parameter = 0.0241
cls.mu_parameter = 1.4103
cls.Eth_parameter = 38.630 # in eV
elif Projectile == 'D' and Target =='C':
cls.lambda_parameter = 1.2848
cls.q_parameter = 0.0539
cls.mu_parameter = 1.1977
cls.Eth_parameter = 27.770 # in eV
elif Projectile == 'T' and Target =='C':
cls.lambda_parameter = 1.9050
cls.q_parameter = 0.0718
cls.mu_parameter = 1.1512
cls.Eth_parameter = 23.617 # in eV
elif Projectile == 'He4' and Target =='C':
cls.lambda_parameter = 4.5910
cls.q_parameter = 0.1951
cls.mu_parameter = 1.7852
cls.Eth_parameter = 19.124 # in eV
elif Projectile == 'C' and Target =='C':
cls.lambda_parameter = 13.9666
cls.q_parameter = 0.7015
cls.mu_parameter = 2.0947
cls.Eth_parameter = 21.4457 # in eV
elif Projectile == 'W' and Target =='C': # Carolina Bjorkas
cls.lambda_parameter = 1.2625
cls.q_parameter = 1.3902
cls.mu_parameter = 3.5395
cls.Eth_parameter = 114.9398 # in eV
elif Projectile == 'Si' and Target =='C': # Fit with RUSTBCA simulations
cls.lambda_parameter = 1.0
cls.q_parameter = 1.8
cls.mu_parameter = 0.3
cls.Eth_parameter = 10.457 # in eV
elif Projectile == 'H' and Target =='Si':
cls.lambda_parameter = 0.4819
cls.q_parameter = 0.0276
cls.mu_parameter = 0.9951
cls.Eth_parameter = 49.792 # in eV
elif Projectile == 'D' and Target =='Si':
cls.lambda_parameter = 0.5326
cls.q_parameter = 0.0569
cls.mu_parameter = 1.6537
cls.Eth_parameter = 24.543 # in eV
elif Projectile == 'T' and Target =='Si':
cls.lambda_parameter = 0.4112
cls.q_parameter = 0.0816
cls.mu_parameter = 0.9325
cls.Eth_parameter = 21.298 # in eV
elif Projectile == 'He4' and Target =='Si':
cls.lambda_parameter = 0.2524
cls.q_parameter = 0.2319
cls.mu_parameter = 1.4732
cls.Eth_parameter = 18.899 # in eV
elif Projectile == 'Si' and Target =='Si':
cls.lambda_parameter = 0.6726
cls.q_parameter = 2.6951
cls.mu_parameter = 1.7584
cls.Eth_parameter = 20.035 # in eV
elif Projectile == 'C' and Target =='Si': # Fit with RUSTBCA simulations
cls.lambda_parameter = 1.0
cls.q_parameter = 1.4
cls.mu_parameter = 12.0
cls.Eth_parameter = 14 # in eV
elif Projectile == 'W' and Target =='Si':
print('Choose a different set of projectile and target, data not available for W->Si')
elif Projectile == 'H' and Target =='W':
cls.lambda_parameter = 1.0087
cls.q_parameter = 0.0075
cls.mu_parameter = 1.2046
cls.Eth_parameter = 457.42 # in eV
elif Projectile == 'D' and Target =='W':
cls.lambda_parameter = 0.3583
cls.q_parameter = 0.0183
cls.mu_parameter = 1.4410
cls.Eth_parameter = 228.84 # in eV
elif Projectile == 'T' and Target =='W':
cls.lambda_parameter = 0.2870
cls.q_parameter = 0.0419
cls.mu_parameter = 1.5802
cls.Eth_parameter = 153.8842 # in eV
elif Projectile == 'He4' and Target =='W':
cls.lambda_parameter = 0.1692
cls.q_parameter = 0.1151
cls.mu_parameter = 1.7121
cls.Eth_parameter = 120.56 # in eV
elif Projectile == 'C' and Target =='W': # Carolina Bjorkas
cls.lambda_parameter = 0.0447
cls.q_parameter = 1.5622
cls.mu_parameter = 1.0200
cls.Eth_parameter = 59.1980 # in eV
elif Projectile == 'W' and Target =='W':
cls.lambda_parameter = 2.2697
cls.q_parameter = 18.6006
cls.mu_parameter = 3.1273
cls.Eth_parameter = 24.9885 # in eV
elif Projectile == 'C' and Target =='SiC': # from RustBCA
cls.lambda_parameter = 1.0
cls.q_parameter = 2.0
cls.mu_parameter = 1.3
cls.Eth_parameter = 25 # in eV
elif Projectile == 'Si' and Target =='SiC': # from RustBCA
cls.lambda_parameter = 2.0
cls.q_parameter = 4.0
cls.mu_parameter = 1.3
cls.Eth_parameter = 70 # in eV
elif Projectile == 'H' and Target =='SiC': # from RustBCA
cls.lambda_parameter = 2.2
cls.q_parameter = 0.031
cls.mu_parameter = 1.5
cls.Eth_parameter = 40 # in eV
elif Projectile == 'Si' and Target =='W':
print('Choose a different set of projectile and target, data not available for Si-> W')
else:
print('Choose a different set of projectile and target')
@classmethod
def Set_Mass_AtomicN(cls, Projectile, Target):
if Projectile == 'H':
cls.Projectile_Mass = 1
cls.Projectile_AtomicN = 1
elif Projectile == 'D':
cls.Projectile_Mass = 2.014
cls.Projectile_AtomicN = 1
elif Projectile == 'T':
cls.Projectile_Mass = 3.016
cls.Projectile_AtomicN= 1
elif Projectile == 'He4':
cls.Projectile_Mass = 4
cls.Projectile_AtomicN = 2
elif Projectile == 'Si':
cls.Projectile_Mass = 28.0855
cls.Projectile_AtomicN = 14
elif Projectile == 'C':
cls.Projectile_Mass = 12.011
cls.Projectile_AtomicN = 6
elif Projectile == 'W':
cls.Projectile_Mass = 183.84
cls.Projectile_AtomicN = 74
if Target == 'C':
cls.Target_Mass = 12.011
cls.Target_AtomicN = 6
elif Target == 'Si':
cls.Target_Mass = 28.0855
cls.Target_AtomicN = 14
elif Target == 'W':
cls.Target_Mass = 183.84
cls.Target_AtomicN = 74
elif Target == 'SiC':
cls.Target_Mass = 40
cls.Target_AtomicN = 20
@classmethod
def Set_ReflectionParameters(cls, Projectile, Target):
if Projectile == 'C' and Target =='C':
cls.a_1 = -0.03022
cls.a_2 = -1.107
cls.a_3 = 6.544
cls.a_4 = 0.1256
elif Projectile == 'C' and Target =='W':
cls.a_1 = 1.96
cls.a_2 = 0.1
cls.a_3 = 2.2
cls.a_4 = 0.18
elif Projectile == 'W' and Target =='W':
cls.a_1 = -3.685
cls.a_2 = 0.02781
cls.a_3 = 0.7825e-4
cls.a_4 = -1.109
elif Projectile == 'Si' and Target =='Si':
cls.a_1 = -0.8631e1
cls.a_2 = 0.6888e-1
cls.a_3 = 0.1808e-1
cls.a_4 = -0.8577
else:
print('Choose a different set of projectile and target')
@classmethod
def Calculate_PhysicalSputtering_RotationFactor(cls, Projectile, Target, Incident_Energy, Incident_Theta):
flag = 0 # change it to zero if not found from the list
from scipy.interpolate import interp1d
if Projectile == 'H' and Target =='C':
Sputtering_Dictionary = Sputtering_Rotation_H_C
elif Projectile == 'D' and Target =='C':
Sputtering_Dictionary = Sputtering_Rotation_D_C
elif Projectile == 'T' and Target =='C':
Sputtering_Dictionary = Sputtering_Rotation_T_C
elif Projectile == 'C' and Target =='C':
Sputtering_Dictionary = Sputtering_Rotation_C_C
elif Projectile == 'D' and Target =='Si':
Sputtering_Dictionary = Sputtering_Rotation_D_Si
elif Projectile == 'Si' and Target =='Si':
Sputtering_Dictionary = Sputtering_Rotation_Si_Si
elif Projectile == 'H' and Target =='W':
Sputtering_Dictionary = Sputtering_Rotation_H_W
elif Projectile == 'D' and Target =='W':
Sputtering_Dictionary = Sputtering_Rotation_D_W
elif Projectile == 'T' and Target =='W':
Sputtering_Dictionary = Sputtering_Rotation_T_W
elif Projectile == 'W' and Target =='W':
Sputtering_Dictionary = Sputtering_Rotation_W_W
elif Projectile == 'W' and Target =='C':
Sputtering_Dictionary = Sputtering_Rotation_W_C
elif Projectile == 'C' and Target =='W':
Sputtering_Dictionary = Sputtering_Rotation_C_W
else:
flag = 1
if flag == 0:
E0_array = np.array(Sputtering_Dictionary['E0'])
f_array = np.array(Sputtering_Dictionary['f'])
b_array = np.array(Sputtering_Dictionary['b'])
c_array = np.array(Sputtering_Dictionary['c'])
Theta0_star_array = np.array(Sputtering_Dictionary['Theta0_star'])
f_interpolate = interp1d(E0_array,f_array,bounds_error=False,fill_value="extrapolate")
b_interpolate = interp1d(E0_array, b_array,bounds_error=False,fill_value="extrapolate")
c_interpolate = interp1d(E0_array, c_array,bounds_error=False,fill_value="extrapolate")
Theta0star_interpolate = interp1d(E0_array, Theta0_star_array,bounds_error=False,fill_value="extrapolate")
cosine_factor = np.cos((np.pi*0.5*Incident_Theta/Theta0star_interpolate(Incident_Energy))**c_interpolate(Incident_Energy))
factor = (cosine_factor)**(-f_interpolate(Incident_Energy)) * np.exp(b_interpolate(Incident_Energy)*(1-(1/cosine_factor) ))
if factor>1000000:
factor = 1
elif np.isnan(factor):
factor = 1
#print("factor is ",factor)
return factor
else:
return 1
def Calculate_PhysicalSputteringParameters(self, Projectile, Target, Incident_Energy, Incident_Theta:float=0.0):
Sputtering_and_reflection.Set_PhysicalSputteringParameters(Projectile,Target)
Sputtering_and_reflection.Set_Mass_AtomicN(Projectile,Target)
electric_charge_square = 1.4399 # eV nm
Bohr_radius = 0.0529177 # nm
Lindhard_length = (9*np.pi**2/128)**(1/3) * Bohr_radius * (self.Target_AtomicN**(2/3)+ self.Projectile_AtomicN**(2/3))**(-0.5) # nm
epsilon = Incident_Energy*(self.Target_Mass/(self.Projectile_Mass + self.Target_Mass))* (Lindhard_length/(self.Projectile_AtomicN*self.Target_AtomicN)*electric_charge_square)
Nuclear_stopping = (0.5*np.log(1+1.2288*epsilon))/(epsilon + 0.1728*np.sqrt(epsilon) + 0.008*epsilon**0.1504)
#Y = self.q_parameter*Nuclear_stopping*((Incident_Energy/self.Eth_parameter)-1)**self.mu_parameter/(self.lambda_parameter + ((Incident_Energy/self.Eth_parameter)-1)**self.mu_parameter)
Numerator = ((Incident_Energy/self.Eth_parameter)-1)
if Numerator < 0:
Numerator = 0.0
Y = self.q_parameter*Nuclear_stopping*Numerator**self.mu_parameter/(self.lambda_parameter + Numerator**self.mu_parameter)
if (Incident_Theta>0.0):
Y = Sputtering_and_reflection.Calculate_PhysicalSputtering_RotationFactor(Projectile, Target, Incident_Energy, Incident_Theta)*Y
return Y
def Calculate_ReflectionCoefficients(self, Projectile, Target, Incident_Energy):
Sputtering_and_reflection.Set_ReflectionParameters(Projectile,Target)
Sputtering_and_reflection.Set_Mass_AtomicN(Projectile,Target)
electric_charge_square = 1.4399 # eV nm
Bohr_radius = 0.0529177 # nm
Lindhard_length = (9*np.pi**2/128)**(1/3) * Bohr_radius * (self.Target_AtomicN**(2/3)+ self.Projectile_AtomicN**(2/3))**(-0.5) # nm
epsilon_L = ((self.Target_Mass + self.Projectile_Mass)/self.Target_Mass)*(self.Target_AtomicN*self.Projectile_AtomicN*electric_charge_square/Lindhard_length)
epsilon = Incident_Energy/epsilon_L
R_N = np.exp(self.a_1*epsilon**self.a_2)/(1+np.exp(self.a_3*epsilon**self.a_4))
return R_N.real
class Physical_Sputtering_Reflection_Plots():
@classmethod
def Sputtering_yields(cls, Projectile, Target,Energies):
s_plot = Sputtering_and_reflection()
#Energies = np.linspace(460,1e5,10000)
Sputtering = np.zeros(Energies.size)
counter = 0
for energy in Energies:
Sputtering[counter] = s_plot.Calculate_PhysicalSputteringParameters(Projectile,Target,energy).real
counter = counter + 1
return Sputtering
@classmethod
def Reflection_yields(cls, Projectile, Target, Energies):
s_plot = Sputtering_and_reflection()
#Energies = np.linspace(20,1e5,10000)
Reflection = np.zeros(Energies.size)
counter = 0
for energy in Energies:
Reflection[counter] = s_plot.Calculate_ReflectionCoefficients(Projectile,Target,energy).real
counter = counter + 1
return Reflection
#plt.plot(Energies,Sputtering)
#s = Sputtering_and_reflection()
#s.ShowAvailableTargets()
| audide12/DIIIDsurface_pyGITR | pyGITR/Physical_Sputtering.py | Physical_Sputtering.py | py | 15,097 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number... |
3207056630 | import datetime as dt
import platform
from pathlib import Path
from unittest import mock
import pdf2gtfs.user_input.cli as cli
from pdf2gtfs.config import Config
from pdf2gtfs.datastructures.gtfs_output.agency import (
GTFSAgency, GTFSAgencyEntry)
from test import P2GTestCase
def get_path_with_insufficient_permissions() -> str:
""" Returns a platform-dependent path, where the user (hopefully) has
not enough permissions for.
"""
if platform.system().lower() == "windows":
return "C:/Windows/pdf2gtfs_test/"
return "/pdf2gtfs_test"
def create_agency(path: Path, num: int, url: str = None, tz: str = None
) -> GTFSAgency:
if not url:
url = "https://www.pdf2gtfs.com"
if not tz:
tz = "Europe/Berlin"
agencies = []
for i in range(num):
agency_id = f"agency_{i}"
agency_entry = GTFSAgencyEntry(agency_id, url, tz, agency_id)
agencies.append(agency_entry.to_output())
input_file = path.joinpath("agency.txt")
with open(input_file, "w", encoding="utf-8") as file:
file.write("agency_id,agency_name,agency_url,agency_timezone")
file.write("\n" + "\n".join(agencies) + "\n")
Config.input_files = [input_file]
return GTFSAgency(path)
class TestCLI(P2GTestCase):
@classmethod
def setUpClass(cls: P2GTestCase, **kwargs) -> None:
kwargs = {"create_temp_dir": True, "disable_logging": True}
super().setUpClass(**kwargs)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test__get_input(self, mock_input: mock.Mock) -> None:
def check(answer: str) -> bool:
return answer == "valid" or answer == ""
mock_input.side_effect = ["invalid", "invalid", "valid", "invalid"]
self.assertEqual("invalid", cli._get_input("test", ["invalid"], ""))
self.assertEqual(1, mock_input.call_count)
mock_input.reset_mock()
mock_input.side_effect = ["invalid", "invalid", "valid", "invalid"]
self.assertEqual("valid", cli._get_input("test", check, ""))
self.assertEqual(3, mock_input.call_count)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test__get_inputs(self, mock_input: mock.Mock) -> None:
def check(answer: str) -> bool:
return answer == "valid" or answer == ""
mock_input.side_effect = ["valid", "valid", "invalid", "test", ""]
results = cli._get_inputs("test", check, "")
self.assertEqual(["valid", "valid"], results)
self.assertEqual(5, mock_input.call_count)
mock_input.reset_mock()
mock_input.side_effect = ["valid", "valid", "invalid", "test", ""]
results = cli._get_inputs("", ["invalid", "test", ""], "")
self.assertEqual(["invalid", "test"], results)
self.assertEqual(5, mock_input.call_count)
mock_input.reset_mock()
def test__to_date(self) -> None:
dates = ["20221004", "20221231", "20220229", "20240229",
"no date", ""]
results = [dt.datetime(2022, 10, 4), dt.datetime(2022, 12, 31),
None, dt.datetime(2024, 2, 29), None, None]
for i in range(len(dates)):
with self.subTest(i=i):
self.assertEqual(results[i], cli._to_date(dates[i]))
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test__get_annotation_exceptions(self, mock_input: mock.Mock) -> None:
# Test valid dates.
dates = [dt.datetime(2022, 10, 4),
dt.datetime(2022, 12, 31),
dt.datetime(2024, 2, 29)]
mock_input.side_effect = (
[date.strftime("%Y%m%d") for date in dates] + [""])
self.assertEqual(dates, cli._get_annotation_exceptions())
self.assertEqual(4, mock_input.call_count)
mock_input.reset_mock()
# Test invalid dates.
dates = [dt.datetime(2022, 10, 4),
dt.datetime(2022, 12, 31),
dt.datetime(2024, 2, 29)]
mock_input.side_effect = (
["20220229", "test"]
+ [date.strftime("%Y%m%d") for date in dates]
+ [""])
self.assertEqual(dates, cli._get_annotation_exceptions())
self.assertEqual(6, mock_input.call_count)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test__get_annotation_default(self, mock_input: mock.Mock) -> None:
mock_input.side_effect = ["y", "n", "y", "n"]
self.assertEqual(True, cli._get_annotation_default())
self.assertEqual(False, cli._get_annotation_default())
self.assertEqual(True, cli._get_annotation_default())
self.assertEqual(False, cli._get_annotation_default())
self.assertEqual(4, mock_input.call_count)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test__handle_annotation(self, mock_input: mock.Mock) -> None:
mock_input.side_effect = ["s", "e", "a"]
self.assertEqual((True, False), cli._handle_annotation("annot"))
self.assertEqual((False, False), cli._handle_annotation("annot"))
self.assertEqual((False, True), cli._handle_annotation("annot"))
self.assertEqual(3, mock_input.call_count)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test_handle_annotations(self, mock_input: mock.Mock) -> None:
annots = ["a", "*"]
# Skip all.
mock_input.side_effect = ["a", "s"]
self.assertEqual({}, cli.handle_annotations(annots))
self.assertEqual(1, mock_input.call_count)
mock_input.reset_mock()
# Skip all single.
mock_input.side_effect = ["s", "s"]
self.assertEqual({}, cli.handle_annotations(annots))
self.assertEqual(2, mock_input.call_count)
mock_input.reset_mock()
fmt = "%Y%m%d"
dates = [dt.datetime(2022, 3, 22),
dt.datetime(2022, 10, 4)]
# Single annotation.
annots = ["*"]
mock_input.side_effect = ["e", "y", dates[0].strftime(fmt),
dates[1].strftime(fmt), ""]
result = {"*": (True, dates)}
self.assertEqual(result, cli.handle_annotations(annots))
self.assertEqual(5, mock_input.call_count)
mock_input.reset_mock()
# Multiple annotations.
annots = ["*", "a"]
mock_input.side_effect = ["e", "y", dates[0].strftime(fmt), "",
"e", "n", dates[1].strftime(fmt), ""]
result = {"*": (True, [dates[0]]), "a": (False, [dates[1]])}
self.assertEqual(result, cli.handle_annotations(annots))
self.assertEqual(8, mock_input.call_count)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test_ask_overwrite_existing_file(self, mock_input: mock.Mock) -> None:
filename = Path(self.temp_dir.name).joinpath("test.zip")
with open(filename, "w", encoding="utf-8") as fil:
fil.write("test_ask_overwrite")
mock_input.side_effect = ["n", "y", "n"]
self.assertFalse(cli.ask_overwrite_existing_file(filename))
self.assertTrue(cli.ask_overwrite_existing_file(filename))
self.assertFalse(cli.ask_overwrite_existing_file(filename))
self.assertEqual(3, mock_input.call_count)
def test__get_agency_string(self) -> None:
widths = [5, 9, 11, 9, 8]
agency = ["agency_id", "agency_name", "url", "tz"]
result = " 0 | agency_id | agency_name | url | tz"
self.assertEqual(result, cli._get_agency_string("0", agency, widths))
result = " 12 | agency_id | agency_name | url | tz"
self.assertEqual(result, cli._get_agency_string("12", agency, widths))
agency = ["agency_id", "agency_name", "url", "timezone"]
result = " 12 | agency_id | agency_name | url | timezone"
self.assertEqual(result, cli._get_agency_string("12", agency, widths))
def test__get_agency_header(self) -> None:
agency = create_agency(Path(self.temp_dir.name), 0)
result = ["agency_id", "agency_name", "agency_url", "agency_timezone"]
self.assertEqual(result, cli._get_agency_header(agency))
def test__get_agency_column_width(self) -> None:
path = Path(self.temp_dir.name)
agency = create_agency(path, 1)
result = [5, 9, 11, 24, 15]
self.assertEqual(result, cli._get_agency_column_widths(agency))
agency = create_agency(path, 1, "a", "agency_test_timezone")
result = [5, 9, 11, 10, 20]
self.assertEqual(result, cli._get_agency_column_widths(agency))
def test__get_agency_prompt(self) -> None:
result = ("Multiple agencies found:\n\t"
"index | agency_id | agency_name "
"| agency_url | agency_timezone\n\t"
" 0 | agency_0 | agency_0 "
"| www.example.com | Europe/Berlin\n\t"
" 1 | agency_1 | agency_1 "
"| www.example.com | Europe/Berlin\n\t"
" 2 | agency_2 | agency_2 "
"| www.example.com | Europe/Berlin\n\n"
"Please provide the index of the agency you want to use.")
agency = create_agency(Path(self.temp_dir.name), 3, "www.example.com")
self.assertEqual(result, cli._get_agency_prompt(agency))
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test__select_agency(self, mock_input: mock.Mock) -> None:
agency = create_agency(Path(self.temp_dir.name), 3)
mock_input.side_effect = ["1"]
self.assertEqual(agency.entries[1], cli.select_agency(agency))
self.assertEqual(1, mock_input.call_count)
mock_input.reset_mock()
# IDs need to be between 1 and number of agencies.
mock_input.side_effect = ["3", "1"]
self.assertEqual(agency.entries[1], cli.select_agency(agency))
self.assertEqual(2, mock_input.call_count)
@mock.patch("pdf2gtfs.user_input.cli.input", create=True)
def test_create_output_directory(self, mock_input: mock.Mock) -> None:
# Enable interactive mode.
Config.non_interactive = False
# Test failing creation. (Permission error)
mock_input.side_effect = ["", "", "", "", "q"]
path = get_path_with_insufficient_permissions()
Config.output_path = path
self.assertFalse(Path(path).exists())
result = cli.create_output_directory()
self.assertFalse(result)
self.assertEqual(5, mock_input.call_count)
self.assertFalse(Path(path).exists())
mock_input.reset_mock()
# Test valid creation.
path = Path(self.temp_dir.name).joinpath("output_dir_test")
Config.output_path = path
self.assertFalse(Path(path).exists())
result = cli.create_output_directory()
self.assertTrue(result)
self.assertTrue(Path(path).exists())
mock_input.reset_mock()
def test_create_output_directory__non_interactive(self) -> None:
# Disable interactive mode.
Config.non_interactive = True
# Test failing creation. (Permission error)
path = get_path_with_insufficient_permissions()
Config.output_path = path
self.assertFalse(Path(path).exists())
result = cli.create_output_directory()
self.assertFalse(result)
self.assertFalse(Path(path).exists())
# Test valid creation.
path = Path(self.temp_dir.name).joinpath(
"output_dir_test__non_interactive")
Config.output_path = path
self.assertFalse(Path(path).exists())
result = cli.create_output_directory()
self.assertTrue(result)
self.assertTrue(Path(path).exists())
| heijul/pdf2gtfs | test/test_user_input/test_cli.py | test_cli.py | py | 11,810 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "platform.system",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pdf2gtfs.datastructures.gtfs_output.agency.GTFSAgencyEntry",
"line_number": 32,
"usage_type": "call"
},
... |
30977077952 | import numpy as np
from scipy.interpolate import lagrange
from numpy.polynomial.polynomial import Polynomial
import matplotlib as plt
size = int(input())
x = np.random.uniform(0, size, size)
y = np.random.uniform(0, size, size)
z = np.random.uniform(0, size, size - 1)
print("x:", x, "y:", y, "z:", z)
#x = np.array([0, 1, 2, 3])
#y = np.array([-2, -5, 0, -4])
#z = np.array([4])
n = len(x)
poly = lagrange(x, y)
dif.append(poly(z[i]))
def phi(i, z):
p = 1
for j in range(0, n):
if i != j:
p *= (z[0] - x[j]) / (x[i] - x[j])
return p
def P(z):
s = 0
for i in range(n):
s += y[i] * phi(i, z)
return s
print(P(z))
plt.show() | Mcken09/Numerical-methods1 | interpol_lagr.py | interpol_lagr.py | py | 646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.uniform",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random... |
22534157712 | import cv2
import argparse
import numpy as np
from cam import *
camback_src = np.float32([[328, 363], [433, 359], [447, 482], [314, 488]])
camback_dst = np.float32([[814, 1359], [947, 1359], [947, 1488], [814, 1488]])
camleft_src = np.float32([[340, 426], [446, 424], [470, 554], [333, 554]])
camleft_dst = np.float32([[833, 1426], [970, 1426], [970, 1554], [833, 1554]])
camright_src = np.float32([[226, 428], [338, 430], [332, 572], [193, 568]])
camright_dst = np.float32([[693, 1430], [838, 1430], [838, 1572], [693, 1572]])
def warpImage(image, src, dst):
image_size = (int(image.shape[1]*3), int(image.shape[0]*3))
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped_image = cv2.warpPerspective(image, M,image_size, flags=cv2.INTER_LINEAR)
return warped_image, M, Minv
def mouse(event, x, y, flags, param):
global frame0
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
cv2.circle(img, (x, y), 1, (0, 0, 255), thickness = -1)
cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), thickness = 1)
cv2.imshow("img", img)
key = cv2.waitKey(0)
img = cv2.imread('./capture/right.jpg')
cv2.namedWindow("img",2)
cv2.namedWindow("pe_img",2)
cv2.setMouseCallback("img", mouse)
cv2.imshow("img", img)
while (True):
key = cv2.waitKey(1)
if key == ord('q'):
break
pe_frame,_,_ = warpImage(img,camright_src,camright_dst)
cv2.imshow('pe_img', pe_frame)
cv2.imshow('img', img)
key = cv2.waitKey(1)
while (True):
key = cv2.waitKey(1)
if key == ord('q'):
cv2.destroyAllWindows()
break
| Hyper-Bullet/littleAnt | perspect_stiching/perspective.py | perspective.py | py | 1,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.float32",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_numb... |
23788787319 | from django.test import TestCase
from django.contrib.auth.models import User
from .models import Message, Thread
class ThreadTestCase(TestCase):
# prepara el entorno de pruebas
def setUp(self):
self.user1 = User.objects.create_user('user1', None, 'test1234')
self.user2 = User.objects.create_user('user2', None, 'test1234')
self.user3 = User.objects.create_user('user3', None, 'test1234')
self.thread = Thread.objects.create()
# tests
def test_add_users_to_thread(self):
# agrega a la relación manyToMany 2 usuarios
self.thread.users.add(self.user1, self.user2)
# test para comprobar si se añaden correctamente
self.assertEqual(len(self.thread.users.all()), 2)
# recupera un hilo existente a partir de sus usuarios
def test_filter_thread_by_users(self):
self.thread.users.add(self.user1, self.user2)
threads = Thread.objects.filter(users=self.user1).filter(users=self.user2)
self.assertEqual(self.thread, threads[0])
# comprueba que no existe un hilo cuando los usuarios no forman parte de el
def test_filter_non_existent_thread(self):
threads = Thread.objects.filter(users=self.user1).filter(users=self.user2)
self.assertEqual(len(threads), 0)
# comprueba que se agrega un mensaje correctamente a un hilo
def test_add_messages_to_thread(self):
self.thread.users.add(self.user1, self.user2)
message1 = Message.objects.create(user=self.user1, content="Muy buenas")
message2 = Message.objects.create(user=self.user2, content="Hola")
self.thread.messages.add(message1, message2)
self.assertEqual(len(self.thread.messages.all()), 2)
for message in self.thread.messages.all():
print("({}): {}".format(message.user, message.content))
# comprueba si un usuario que no forma parte del hilo puede añadir mensajes
def test_add_message_from_user_not_in_thread(self):
self.thread.users.add(self.user1, self.user2)
message1 = Message.objects.create(user=self.user1, content="Muy buenas")
message2 = Message.objects.create(user=self.user2, content="Hola")
message3 = Message.objects.create(user=self.user3, content="Soy un espía")
self.thread.messages.add(message1, message2, message3)
self.assertEqual(len(self.thread.messages.all()), 2)
def test_find_thread_with_custom_manager(self):
self.thread.users.add(self.user1, self.user2)
thread = Thread.objects.find(self.user1, self.user2)
self.assertEqual(self.thread, thread)
def test_find_or_create_thread_with_custom_manager(self):
self.thread.users.add(self.user1, self.user2)
thread = Thread.objects.find_or_create(self.user1, self.user2)
self.assertEqual(self.thread, thread)
thread = Thread.objects.find_or_create(self.user1, self.user3)
self.assertIsNotNone(thread)
| mjmed/Django-Web-Playground | messenger/tests.py | tests.py | py | 3,080 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 10,
"usa... |
14303021398 | import cv2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='data/haarcascade_frontalface_default.xml')
def main(args):
# Load the cascade
face_cascade = cv2.CascadeClassifier(args.model)
# To capture video from webcam.
cap = cv2.VideoCapture(0)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Release the VideoCapture object
cap.release()
if __name__ == "__main__":
args = parser.parse_args()
main(args) | sinantie/zumo-face-follower-rpi | test/test_detect_face_video.py | test_detect_face_video.py | py | 1,130 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.cvt... |
298698477 | # Write a web scraper that fetches the information from the Wikipedia page
# on Web scraping. Extract all the links on the page and filter them so the
# navigation links are excluded.
# Programmatically follow one of the links that lead to another Wikipedia article,
# extract the text content from that article, and save it to a local text file.
# BONUS TASK: Use RegExp to find all numbers in the text.
URL = "https://en.wikipedia.org/wiki/Web_scraping"
import requests
from bs4 import BeautifulSoup
from pprint import pprint
import json
def extract_links(URL):
# create a bs object
page = requests.get(URL)
soup = BeautifulSoup(page.text,"lxml")
data = {}
for para in soup.find_all("p"):
try:
links = para.find_all("a")
for link in links:
text = link.text
page_link = link["href"]
if page_link[0] == "/":
full_link = f"https://en.wikipedia.org{page_link}"
data[text] = full_link
except TypeError as e:
link = None
with open("wiki_web_scrap.json", "w") as wws:
json.dump(data,wws)
def extract_texts():
with open("wiki_web_scrap.json","r") as r:
data = json.load(r)
for key,link in data.items():
page = requests.get(link)
soup = BeautifulSoup(page.text, "lxml")
main_content = soup.find("main")
divs = main_content.find_all("div", class_="mw-parser-output")
for div in divs:
with open(f"resources/04_web-scraping/wiki/{key}.txt","a") as f:
f.write(div.text)
print("file created successfully \n")
if __name__ == "__main__":
extract_texts()
| symonkipkemei/cnd-labs | python-301/resources/04_web-scraping/04_05_wiki_scrape.py | 04_05_wiki_scrape.py | py | 1,751 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number... |
1914845096 | from joblib import parallel_backend
from tslearn.metrics import cdist_dtw, dtw
import pandas as pd
import numpy as np
def get_DTW_distance_matrix_w_mask(matrix1, matrix2, mask, window = 4, n_jobs = 1):
def calculate_distance(profiles):
profile1, profile2 = profiles
return dtw(matrix1.loc[profile1], matrix2.loc[profile2], sakoe_chiba_radius=window)
# figure out entries that need to be calculated
entries_to_calculate = (
mask.stack().pipe(lambda x: x[x==0]).index
)
distances = entries_to_calculate.map(calculate_distance)
cost_matrix = (
pd.Series(distances, index = entries_to_calculate)
.to_frame('distance')
.rename_axis(['meterID1', 'meterID2'], axis = 0)
.reset_index()
.pivot_table(index = 'meterID1', columns = 'meterID2', values = 'distance')
.fillna(np.inf)
)
return cost_matrix
def get_DTW_distance_matrix(matrix1, matrix2 = None, window = 4, n_jobs = 4):
"""
Calculate similarity between all rows of matrix 1 with all rows of matrix 2 (if matrix2 is non calculate self similarity of matrix1)
"""
m1 = matrix1.to_numpy()
m2 = None if matrix2 is None else matrix2.to_numpy()
if n_jobs > 1:
with parallel_backend('loky', n_jobs=n_jobs):
distance_matrix = cdist_dtw(m1, m2, sakoe_chiba_radius = window, n_jobs = n_jobs)
else:
distance_matrix = cdist_dtw(m1, m2, sakoe_chiba_radius=window, n_jobs = n_jobs)
return pd.DataFrame(distance_matrix, index = matrix1.index, columns = matrix2.index) | jankrans/Conditional-Generative-Neural-Networks | repositories/profile-clustering/energyclustering/clustering/DTW.py | DTW.py | py | 1,570 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tslearn.metrics.dtw",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "joblib.parallel_backe... |
26987702949 | import pytest
from privatechats.models import PrivateChatRoom, PrivateChatRoomMessage
@pytest.mark.django_db
def test_private_chat_room_str_method():
"""Test PrivateChatRoom __str__ method"""
group_chat_room_obj = PrivateChatRoom.objects.create(name='user1user2')
assert str(group_chat_room_obj) == 'user1user2'
def test_group_chat_room_message_str_method(django_user_model):
"""Test PrivateChatRoomMessage __str__ method"""
user = django_user_model.objects.create(username='user1')
room_obj = PrivateChatRoom.objects.create(name='user1user2')
room_msg_obj = PrivateChatRoomMessage.objects.create(
user=user,
room=room_obj,
content='Hello World!'
)
assert str(room_msg_obj) == f"user1user2:user1 | {room_msg_obj.timestamp} => Hello World!"
| mf210/LetsChat | tests/privatechats/privatechats_model_tests.py | privatechats_model_tests.py | py | 808 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "privatechats.models.PrivateChatRoom.objects.create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "privatechats.models.PrivateChatRoom.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "privatechats.models.PrivateChatRoom",
"li... |
25287870424 | import threading
import json
import os
from astar_model import AStarModel
from tkinter.ttk import Progressbar
from tkinter import (Tk, Frame, Button, Label, Entry, Checkbutton, Scale, Canvas, messagebox, filedialog,
StringVar, IntVar,
DISABLED, NORMAL,
W, S, NW, EW, NSEW,
BOTTOM, TOP, LEFT,
X, Y,
HORIZONTAL,
YES)
class AStarView(Tk):
def __init__(self, width=15):
Tk.__init__(self)
self.title('A* Search Visualizer')
# Application version
self.__VERSION = '1.0.0'
# Grid width slider range
self.__MAX_GRID_WIDTH = 100
self.__MIN_GRID_WIDTH = 2
# Width of the control frame in pixels (you can modify this)
self.__CONTROL_DIM_WIDTH = 250
# Width of the grid in pixels (you can modify this)
self.__GRID_DIM_WIDTH = 800
# Calculate the correct dimensions for the root frame (8px offsets prevent grid clipping from grid border thickness)
self.geometry('{}x{}'.format(
self.__CONTROL_DIM_WIDTH + self.__GRID_DIM_WIDTH + 8, self.__GRID_DIM_WIDTH + 8))
# Disable application resizing
self.resizable(0, 0)
# Event bindings
self.bind('<ButtonPress-1>', self.__on_m1_down)
self.bind('<B1-Motion>', self.__on_m1_down)
self.bind('<ButtonPress-3>', self.__on_m3_down)
self.bind('<B3-Motion>', self.__on_m3_down)
self.bind('<KeyPress>', self.__on_key_press)
self.bind('<KeyRelease>', self.__on_key_release)
# Toggles when certain keys are held down
self.__EDIT_MODE = {
'setStart': False,
'setEnd': False,
}
# Button colours (you can change these)
self.__COLOUR_RECONFIGURE_BUTTON = 'coral'
self.__COLOUR_HOW_TO_USE_BUTTON = 'SlateGray2'
self.__COLOUR_ABOUT_BUTTON = 'SlateGray1'
self.__COLOUR_IMPORT_MAZE_BUTTON = 'sandy brown'
self.__COLOUR_EXPORT_MAZE_BUTTON = 'dark salmon'
# Maze colours (you can change these)
self.__COLOUR_EMPTY = 'white'
self.__COLOUR_WALL = 'gray25'
self.__COLOUR_START = 'pale green'
self.__COLOUR_END = 'salmon'
self.__COLOUR_UNSOLVED = 'thistle'
self.__COLOUR_SOLVED = 'lemon chiffon'
self.__COLOUR_PATH = 'light slate blue'
# Dictionary for when update_gui() is called
self.__SYMBOL_TO_COLOUR = {
' ': self.__COLOUR_EMPTY,
'W': self.__COLOUR_WALL,
'S': self.__COLOUR_START,
'E': self.__COLOUR_END,
'?': self.__COLOUR_UNSOLVED,
'X': self.__COLOUR_SOLVED,
'P': self.__COLOUR_PATH
}
# Dialog messages
self.__DIALOG_MESSAGES = {
'help': ('Holding left-click places walls\n'
'Holding right-click removes walls\n\n'
'Holding [S] while pressing left-click sets the start point\n'
'Holding [E] while pressing left-click sets the end point\n\n'
'Press [space] to start / stop the solver\n'
'Press [Esc] to close the application'),
'about': ('This application visualizes the A* search algorithm.\n\n'
'Made by Jonathan Mack\n'
'v{}'.format(self.__VERSION)),
}
# Contains the GUI representaiton of model.settings
self.cb_values = {}
# Disables mouse and keyboard events while True
self.__is_reconfiguring = False
# Initialize the backing model
self.__initialize_model(width, width)
# Initialize the GUI
self.__initialize_gui()
def __initialize_model(self, nRow, nCol):
''' Initializes the backing model for the view.
Args:
nRow::[int]
The number of rows in the model
nCol::[int]
The number of columns in the model
Returns:
None
'''
self.model = AStarModel(view=self, nRow=nRow, nCol=nCol)
# Set the model settings to the GUI settings during reconfiguration
self.__handle_cb()
# Disable print to console
self.model.set_setting('enablePrintToConsole', False)
def __initialize_gui(self):
''' Initializes the GUI.
Args:
None
Returns:
None
'''
self.__initialize_control_frame()
self.__initialize_grid_frame()
def __initialize_control_frame(self):
''' Initializes the control frame which contains application options.
Args:
None
Returns:
None
'''
# The control frame itself
control_frame = Frame(self, width=self.__CONTROL_DIM_WIDTH)
control_frame.pack_propagate(0)
control_frame.pack(side=LEFT, fill=Y)
# Initialize children frames
self.__initialize_configuration_frame(control_frame)
self.__initialize_options_frame(control_frame)
self.__initialize_import_export_frame(control_frame)
self.__initialize_stats_frame(control_frame)
self.__initialize_help_frame(control_frame)
# Initialize the Start / Stop button
self.start_stop_button = Button(control_frame,
text='START',
bg='pale green',
relief='flat',
pady=20,
command=self.__toggle_solver)
self.start_stop_button.pack(side=BOTTOM, fill=X)
# Interactive GUI components are disabled during reconfiguration
self.__interactive_gui_components = [self.grid_width_slider,
self.__reconfigure_button,
self.__cb_diagonal,
self.__cb_grid_lines,
self.__how_to_use_button,
self.__about_button,
self.__import_button,
self.__export_button,
self.start_stop_button]
def __initialize_configuration_frame(self, master):
''' Initializes the configuration frame which is a child of the control frame.
Args:
master::[tk.Frame]
The parent frame of this frame (the control frame)
Returns:
None
'''
# The configuration frame itself
configuration_frame = Frame(master)
configuration_frame.pack(anchor=W, padx=20, pady=20, fill=X)
# Allow grid components to fill the width of the frame
configuration_frame.grid_columnconfigure(0, weight=1)
configuration_frame.grid_columnconfigure(1, weight=1)
# Configuration label
configuration_label = Label(
configuration_frame, text='CONFIGURATION', font=('Helvetica', 16))
configuration_label.grid(row=0, column=0, sticky=W, columnspan=2)
# Grid width label
self.grid_width_label = Label(
configuration_frame, text='Grid width: {}'.format(self.model.get_nrow()))
self.grid_width_label.grid(row=1, column=0, sticky=W, columnspan=2)
# Grid width slider
self.grid_width_slider = Scale(configuration_frame,
width=20,
from_=self.__MIN_GRID_WIDTH,
to=self.__MAX_GRID_WIDTH,
orient=HORIZONTAL,
showvalue=False)
# Set default slider value
self.grid_width_slider.set(self.model.get_nrow())
# Slider bindings
self.grid_width_slider.bind(
'<B1-Motion>', self.__handle_grid_width_slider_change)
self.grid_width_slider.bind(
'<ButtonRelease-1>', self.__handle_grid_width_slider_change)
self.grid_width_slider.grid(row=2, column=0, sticky=EW, columnspan=2)
# Reconfigure button
self.__reconfigure_button = Button(
configuration_frame,
text='Reconfigure',
bg=self.__COLOUR_RECONFIGURE_BUTTON,
command=self.__handle_reconfigure)
self.__reconfigure_button.grid(row=3,
column=0,
sticky=EW,
columnspan=2)
# Progress bar for reconfiguration
self.__progress_bar = Progressbar(
configuration_frame,
orient=HORIZONTAL,
mode='indeterminate')
def __initialize_options_frame(self, master):
''' Initializes the options frame which is a child of the control frame.
Args:
master::[tk.Frame]
The parent frame of this frame (the control frame)
Returns:
None
'''
# The options frame itself
options_frame = Frame(master)
options_frame.pack(anchor=W, padx=20, pady=20, fill=X)
# Allow grid components to fill the width of the frame
options_frame.grid_columnconfigure(0, weight=1)
# Options label
options_label = Label(
options_frame,
text='OPTIONS',
font=('Helvetica', 16))
options_label.grid(row=0, column=0, sticky=W, columnspan=2)
# Allow diagonal movement Checkbutton
self.allow_diagonals = IntVar(
value=self.model.get_setting('allowDiagonals'))
self.cb_values['allowDiagonals'] = self.allow_diagonals
self.__cb_diagonal = Checkbutton(
options_frame,
text='Allow diagonal movement',
variable=self.allow_diagonals,
command=self.__handle_cb)
self.__cb_diagonal.grid(row=1, column=0, sticky=W)
# Show grid lines Checkbutton
self.show_grid_lines = IntVar(value=True)
self.__cb_grid_lines = Checkbutton(
options_frame,
text='Show grid lines',
variable=self.show_grid_lines,
command=self.__handle_show_grid_lines)
self.__cb_grid_lines.grid(row=2, column=0, sticky=W)
def __initialize_import_export_frame(self, master):
''' Initializes the Import / Export frame which is a child of the control frame.
Args:
master::[tk.Frame]
The parent frame of this frame (the control frame)
Returns:
None
'''
# The Import / Export frame itself
import_export_frame = Frame(master)
import_export_frame.pack(anchor=W, padx=20, pady=20, fill=X)
# Allow grid components to fill the width of the frame
import_export_frame.grid_columnconfigure(0, weight=1)
import_export_frame.grid_columnconfigure(1, weight=1)
# Import / Export label
import_export_label = Label(
import_export_frame,
text='IMPORT / EXPORT',
font=('Helvetica', 16)
)
import_export_label.grid(row=0, column=0, sticky=W, columnspan=2)
# Import button
self.__import_button = Button(import_export_frame,
text="Import Maze",
bg=self.__COLOUR_IMPORT_MAZE_BUTTON,
command=self.__handle_import)
self.__import_button.grid(row=1, column=0, sticky=EW)
# Export button
self.__export_button = Button(import_export_frame,
text="Export Maze",
bg=self.__COLOUR_EXPORT_MAZE_BUTTON,
command=self.__handle_export)
self.__export_button.grid(row=1, column=1, sticky=EW)
def __initialize_stats_frame(self, master):
''' Initializes the stats frame which is a child of the control frame.
Args:
master::[tk.Frame]
The parent frame of this frame (the control frame)
Returns:
None
'''
# The stats frame itself
stats_frame = Frame(master)
stats_frame.pack(anchor=W, padx=20, pady=20, fill=X)
# Allow grid components to fill the width of the frame
stats_frame.grid_columnconfigure(0, weight=1)
stats_frame.grid_columnconfigure(1, weight=5)
# Stats label
stats_label = Label(
stats_frame,
text='STATS',
font=('Helvetica', 16)
)
stats_label.grid(row=0, column=0, sticky=W, columnspan=2)
# Unsolved label
self.unsolved_label_var = StringVar()
unsolved_static_label = Label(
stats_frame,
text='# Unsolved Nodes',
bg=self.__COLOUR_UNSOLVED,
anchor=W
)
unsolved_static_label.grid(row=1, column=0, sticky=EW)
unsolved_dynamic_label = Label(
stats_frame,
textvariable=self.unsolved_label_var
)
unsolved_dynamic_label.grid(row=1, column=1, sticky=W)
# Solved label
self.solved_label_var = StringVar()
solved_static_label = Label(
stats_frame,
text='# Solved Nodes',
bg=self.__COLOUR_SOLVED,
anchor=W
)
solved_static_label.grid(row=2, column=0, sticky=EW)
solved_dynamic_label = Label(
stats_frame,
textvariable=self.solved_label_var
)
solved_dynamic_label.grid(row=2, column=1, sticky=W)
# Path label
self.path_label_var = StringVar()
path_static_label = Label(
stats_frame,
text='# Path Nodes',
bg=self.__COLOUR_PATH,
anchor=W
)
path_static_label.grid(row=3, column=0, sticky=EW)
path_dynamic_label = Label(
stats_frame,
textvariable=self.path_label_var
)
path_dynamic_label.grid(row=3, column=1, sticky=W)
# Elapsed time label
self.elapsed_label_var = StringVar()
elapsed_time_static_label = Label(
stats_frame,
text='Elapsed Time (s)'
)
elapsed_time_static_label.grid(row=4, column=0, sticky=W)
elapsed_time_dynamic_label = Label(
stats_frame,
textvariable=self.elapsed_label_var
)
elapsed_time_dynamic_label.grid(row=4, column=1, sticky=W)
def __initialize_help_frame(self, master):
# The help frame itself
help_frame = Frame(master)
help_frame.pack(anchor=W, padx=20, pady=20, fill=X)
# Allow grid components to fill the width of the frame
help_frame.grid_columnconfigure(0, weight=1)
help_frame.grid_columnconfigure(1, weight=1)
# Help label
help_label = Label(help_frame, text='HELP', font=('Helvetica', 16))
help_label.grid(row=0, column=0, sticky=W, columnspan=2)
# How to Use button
self.__how_to_use_button = Button(help_frame,
text='How to Use',
bg=self.__COLOUR_HOW_TO_USE_BUTTON,
command=lambda title='How to Use', message=self.__DIALOG_MESSAGES['help']: self.__show_info_dialog(title, message))
self.__how_to_use_button.grid(row=1, column=0, sticky=EW)
# About button
self.__about_button = Button(help_frame,
text='About',
bg=self.__COLOUR_ABOUT_BUTTON,
command=lambda title='About', message=self.__DIALOG_MESSAGES['about']: self.__show_info_dialog(title, message))
self.__about_button.grid(row=1, column=1, sticky=EW)
def __initialize_grid_frame(self):
self.grid_frame = Frame(self,
height=self.__GRID_DIM_WIDTH,
width=self.__GRID_DIM_WIDTH,
highlightbackground='gray',
highlightthickness=3)
# Create blank canvas
self.canvas = Canvas(self.grid_frame,
height=self.__GRID_DIM_WIDTH,
width=self.__GRID_DIM_WIDTH,
bg='white')
self.canvas.pack()
self.grid_frame.pack(side=LEFT)
# Colour the entire grid initially
self.__POS_TO_SQUARE = {}
all_indices = [(x, y) for x in range(self.model.get_nrow())
for y in range(self.model.get_ncol())]
self.update_gui(maze=self.model.get_curr_maze(),
diff_positions=all_indices,
is_rapid_config=False)
'''
GUI HANDLERS.
'''
def __handle_reconfigure(self, is_importing=False, loaded_maze=None):
def reconfiguration_thread():
# Disable mouse and keyboard events
self.__is_reconfiguring = True
# Change the grid width slider before the GUI is disabled
if is_importing:
# Move the grid width slider to the new grid width
self.grid_width_slider.set(loaded_maze['gridWidth'])
# Update the grid width label
self.grid_width_label.configure(
text='Grid width: {}'.format(self.grid_width_slider.get()))
# Disable interactive GUI components
self.__disable_gui()
# Set the new grid width to the one in the imported file if importing, otherwise use the slider value
print('{}...'.format(
'Importing maze' if is_importing else 'Reconfiguring'))
new_width = loaded_maze['gridWidth'] if is_importing else int(
self.grid_width_slider.get())
# Replace the Reconfigure button with the progress bar
self.__reconfigure_button.grid_remove()
self.__progress_bar.grid(row=3, column=0, sticky=EW, columnspan=2)
self.__progress_bar.start()
# The long reconfiguration task
reconfigure(new_width=new_width,
is_importing=is_importing,
loaded_maze=loaded_maze)
# Replace the progress bar with the Reconfigure button
self.__progress_bar.stop()
self.__progress_bar.grid_forget()
self.__reconfigure_button.grid(
row=3, column=0, sticky=EW, columnspan=2)
# Re-enable mouse and keyboard events
self.__is_reconfiguring = False
# Re-enable interactive GUI components
self.__enable_gui()
print('{} complete!'.format(
'Import' if is_importing
else 'Reconfiguration'))
# Show success dialog
self.__show_info_dialog(
title='{} Complete'.format(
'Import' if is_importing else 'Reconfiguration'),
message='Successfully {} a {} x {} maze!'.format('imported' if is_importing else 'configured',
self.model.get_nrow(),
self.model.get_ncol()))
def reconfigure(new_width, is_importing, loaded_maze):
# Recreate the backing model and redraw the GUI
self.__initialize_model(new_width, new_width)
self.__POS_TO_SQUARE = {}
self.canvas.delete('all')
# Colour the entire grid initially
all_indices = [(x, y) for x in range(self.model.get_nrow())
for y in range(self.model.get_ncol())]
self.update_gui(maze=self.model.get_curr_maze(),
diff_positions=all_indices,
is_rapid_config=False)
# Update model with the imported data if the reconfiguration was triggered by an import
if is_importing and loaded_maze is not None:
self.model.import_maze_data(loaded_maze)
# Make sure that the solver is stopped
if not self.model.is_solving():
# Display confirmation dialog
is_reconfiguring = messagebox.askyesno(title='Reconfigure',
message=('Are you sure you want to reconfigure?\n'
'All walls will be erased.'),
icon='warning')
if is_reconfiguring == YES:
threading.Thread(target=reconfiguration_thread).start()
else:
messagebox.showerror(title='Failed to Reconfigure',
message='Cannot reconfigure while the solver is running.')
def __handle_grid_width_slider_change(self, event):
self.grid_width_label.configure(
text='Grid width: {}'.format(self.grid_width_slider.get()))
def __handle_cb(self):
for k, v in self.cb_values.items():
self.model.set_setting(k, bool(v.get()))
def __handle_show_grid_lines(self):
# Determine outline colour
outline_colour = 'gray' if self.show_grid_lines.get() else ''
# Colour all square outlines
for pos in self.__POS_TO_SQUARE:
square = self.__POS_TO_SQUARE[pos]
self.canvas.itemconfig(square, outline=outline_colour)
def __handle_import(self):
if not self.model.is_solving():
# Display load file dialog
filename = filedialog.askopenfilename(parent=self,
title='Import Maze',
initialdir=os.getcwd() + '/sample_mazes')
if filename != '':
try:
# Read the contents of the file
with open(filename, 'r') as file:
content = file.readlines()
# Assign the file contents to a dictionary
loaded_maze = json.loads(content[0])
# Reconfigure the maze using the imported data
self.__handle_reconfigure(
is_importing=True, loaded_maze=loaded_maze)
except:
print('Failed to import maze from {}: Incompatible or corrupted file'.format(
filename))
def __handle_export(self):
if not self.model.is_solving():
# Display save file dialog
filetypes = [('JSON', '*.json')]
filename = filedialog.asksaveasfilename(parent=self,
title='Export Maze',
initialfile='my_astar_maze',
defaultextension='.json',
filetypes=filetypes)
# Prepare the current maze configuration data
curr_maze = {
'gridWidth': self.model.get_nrow(),
'start': self.model.get_start(),
'end': self.model.get_end(),
'walls': list(self.model.get_walls())
}
# Write to the file
if filename != '':
with open(filename, 'w') as file:
file.write(json.dumps(curr_maze))
print('Successfully exported maze configuration to {}.'.format(filename))
'''
EVENT HANDLERS.
'''
def __on_m1_down(self, event):
self.__handle_mouse_down(event, True)
def __on_m3_down(self, event):
self.__handle_mouse_down(event, False)
def __handle_mouse_down(self, event, is_setting_wall):
def is_event_pos_valid():
return 0 <= event.x <= self.__GRID_DIM_WIDTH and 0 <= event.y <= self.__GRID_DIM_WIDTH
def calculate_square_pos():
square_pos_x = int(event.x // self.__SQUARE_WIDTH)
square_pos_y = int(event.y // self.__SQUARE_WIDTH)
return (square_pos_x, square_pos_y)
# Validate that the solver is stopped, the GUI is not reconfiguring, and the position is good
if (not self.model.is_solving()
and not self.__is_reconfiguring
and event.widget == self.canvas
and is_event_pos_valid()):
square_pos = calculate_square_pos()
if self.__EDIT_MODE['setStart']:
self.model.set_start(square_pos)
elif self.__EDIT_MODE['setEnd']:
self.model.set_end(square_pos)
else:
self.model.set_wall(square_pos, is_setting_wall)
def __on_key_press(self, event):
''' Hanldes keyboard events.
[space] - Start / stop the solver
[Esc] - Quit the application
[S] - Toggle set start node
[E] - Toggle set end node
Args:
event::[tkinter.Event]
The keyboard event.
Returns:
None
'''
key_code = event.keysym
# Only handle keyboard events when the GUI is not reconfiguring
if not self.__is_reconfiguring:
# Press [space] to start / stop the solver
if key_code == 'space':
self.__toggle_solver()
# Press [Esc] to quit the application
elif key_code == 'Escape':
if not self.model.is_solving():
# Display confirmation dialog
is_quitting = messagebox.askyesno(title='Exit Application',
message='Are you sure you want to exit the application?',
icon='warning')
if is_quitting == YES:
print('Closing application...')
self.destroy()
else:
print(
'Cannot close the application while the solver is running.')
messagebox.showerror(title='Failed to Close Application',
message='Cannot close the application while the solver is running.')
# Edit mode toggles
elif key_code == 's':
self.__EDIT_MODE['setStart'] = True
elif key_code == 'e':
self.__EDIT_MODE['setEnd'] = True
def __on_key_release(self, event):
key_code = event.keysym
if key_code == 's':
self.__EDIT_MODE['setStart'] = False
elif key_code == 'e':
self.__EDIT_MODE['setEnd'] = False
def __handle_wall_click(self, pos):
self.model.set_wall(pos, True)
'''
UTILITY METHODS.
'''
def __toggle_solver(self):
if not self.model.is_solving():
# Disable everything in the GUI except the Start / Stop button and the Show grid lines Checkbutton
self.__disable_gui()
self.start_stop_button.configure(state=NORMAL)
self.__cb_grid_lines.configure(state=NORMAL)
self.model.solve()
else:
print('Solver stopped.')
self.__enable_gui()
self.model.stop_solving()
def __disable_gui(self):
for component in self.__interactive_gui_components:
component.configure(state=DISABLED)
def __enable_gui(self):
for component in self.__interactive_gui_components:
component.configure(state=NORMAL)
def __show_info_dialog(self, title, message):
messagebox.showinfo(title, message)
def __show_error_dialog(self, title, message):
messagebox.showerror(title, message)
'''
UPDATE METHOD.
'''
def __calculate_square_width(self):
# Update the square width depending on the new model
self.__SQUARE_WIDTH = self.__GRID_DIM_WIDTH / self.model.get_nrow()
def update_gui(self, maze, diff_positions, is_rapid_config):
''' Updates the GUI by colouring the grid labels contained in diff_indices according to the maze symbols.
Also updates the stats frame.
Args:
maze::[list[list]]
A 2D array containing symbols that represent the maze
diff_indices::[list]
A list containing the positions of nodes that have changed since the previous update
Returns:
None
'''
# Configure the Start / Stop button to display the appropriate text and colour
if self.model.is_solving():
self.start_stop_button.configure(text='STOP', bg='salmon')
elif not self.__is_reconfiguring:
# Re-enable the GUI if the solver is done
self.__enable_gui()
self.start_stop_button.configure(text='START', bg='pale green')
# Update the square width based on the current number of rows in the model
self.__calculate_square_width()
# Determine outline colour
outline_colour = 'gray' if self.show_grid_lines.get() else ''
# Update grid by colouring the appropriate square
for (x, y) in diff_positions:
# Create a square at (x, y) if it does not yet exist
if (x, y) not in self.__POS_TO_SQUARE:
square = self.canvas.create_rectangle(x * self.__SQUARE_WIDTH,
y * self.__SQUARE_WIDTH,
(x + 1) *
self.__SQUARE_WIDTH,
(y + 1) *
self.__SQUARE_WIDTH,
fill=self.__SYMBOL_TO_COLOUR[maze[x][y]],
outline=outline_colour,
tag='to-delete')
self.__POS_TO_SQUARE[(x, y)] = square
# Configure the square at (x, y) since it exists
else:
self.canvas.itemconfig(self.__POS_TO_SQUARE[(
x, y)], fill=self.__SYMBOL_TO_COLOUR[maze[x][y]])
# Update stats
self.unsolved_label_var.set(str(self.model.get_stat('numUnsolved')))
self.solved_label_var.set(str(self.model.get_stat('numSolved')))
self.path_label_var.set(str(self.model.get_stat('numPath')))
self.elapsed_label_var.set(str(self.model.get_stat('elapsedTime')))
# Handle GUI updates differently if the update is caused by a wall config or not
if is_rapid_config:
# update_idletasks() prevents fatal crashes when setting / removing nodes rapidly
self.update_idletasks()
else:
# update() allows the user to stop the solver and prevents calls to model.solve() from queueing
self.update()
def main():
print('Starting application...')
app = AStarView()
app.mainloop()
if __name__ == '__main__':
main()
| Mackaronii/astar-search-visualizer | astar_gui.py | astar_gui.py | py | 31,617 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tkinter.Tk",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk.__init__",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "astar_model.AStarModel",
... |
32542356510 | from google.cloud import storage
from configparser import ConfigParser
from google.oauth2 import service_account
from googleapiclient.discovery import build
from utils.demo_io import (
get_initial_slide_df_with_predictions_only,
get_fovs_df,
get_top_level_dirs,
populate_slide_rows,
get_histogram_df,
list_blobs_with_prefix,
)
import polars as pl
from gcsfs import GCSFileSystem
from PIL import Image
import asyncio
from utils.zarr_utils import parse_slide, get_image_from_zarr
# Parse in key and bucket name from config file
cfp = ConfigParser()
cfp.read("config.ini")
service_account_key_json = cfp["GCS"]["gcs_storage_key"]
gs_url = cfp["GCS"]["bucket_url"]
bucket_name = gs_url.replace("gs://", "")
bucket2_name = "octopi-malaria-data-processing"
zipzarr_url = "octopi-malaria-data-processing/072622-D1-3_2022-07-26_17-50-42.852998/version1/spot_images.zip"
# Define GCS file system so files can be read
gcs = GCSFileSystem(token=service_account_key_json)
# Authenticate using the service account key file
credentials = service_account.Credentials.from_service_account_file(
service_account_key_json, scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
client = storage.Client.from_service_account_json(service_account_key_json)
# Create a storage client
storage_service = build("storage", "v1", credentials=credentials)
spot_img_zarr = parse_slide(gcs, zipzarr_url)
for i in range(25):
spot_img = Image.fromarray(get_image_from_zarr(spot_img_zarr, 240 + i)["compose"])
spot_img.show()
| alice-gottlieb/nautilus-dashboard | examples/zarr_example.py | zarr_example.py | py | 1,548 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gcsfs.GCSFileSystem",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials.from_service_account_file",
"line_number": 39,
"u... |
74157726824 | import psycopg2
import src.DBConfig as config
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Database:
def __init__(self, debugLogFile):
self.db_server_conn = None
self.database_params = None
self.db_cursor = None
self.db_name = None
self.bug_table_name = None
self.debugLogFile = debugLogFile
# Open a connection to the host server that the database is stored on.
def connect(self, server_params):
"""
Connects to the postgres server that has the bug tracker database on it.
If an invalid key is provided then a connection will not be made.
:param server_params: config file key
:return:
"""
try:
# Get the server params from Database.ini
params = config.config(server_params)
# Create a connection to the posgresql database server
self.db_server_conn = psycopg2.connect(**params)
self.db_cursor = self.db_server_conn.cursor()
except (Exception, psycopg2.Error) as e:
self.debugLogFile.writeToFile("Error while connecting to PostgreSQL " + str(e))
# Disconnect is its own function so that the connection to the
# databases server can remain open while the database and table are being
# queried.
def disconnect(self):
if self.db_server_conn:
self.db_cursor.close()
self.db_server_conn.close()
print("PostgreSQL connection is closed")
def create_database(self, database_params):
if self.db_server_conn:
try:
self.database_params = config.config(database_params)
self.db_server_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
database_name = self.database_params.get('database')
sqlCreateDatabase = "create database " + database_name + ""
self.db_cursor.execute(sqlCreateDatabase)
except (Exception, psycopg2.Error) as e:
self.debugLogFile.writeToFile("Could not create Database " + str(e))
def create_table(self, table_name):
# need to connect to the database that I am actually creating the table in.
conn = None
cursor = None
self.bug_table_name = table_name
try:
# connect to the database on the postgres server.
conn = psycopg2.connect(**self.database_params)
# once we are connected to the database that we created on the postgreSQL server
# we can create a table in that database.
cursor = conn.cursor()
# basic Table
table = ("CREATE TABLE " + table_name + """ (
Title text NULL,
Traceback_info text NULL,
Resolved BOOL NULL,
Version text NULL,
PRIMARY KEY (Title)
); """)
# execute the sql query
cursor.execute(table)
# commit the changes so they are saved
conn.commit()
except (Exception, psycopg2.Error) as e:
self.debugLogFile.writeToFile("Could not create table " + str(e))
finally:
# close the connection to the database to prevent memory leaks
if conn:
cursor.close()
conn.close()
def list_insert(self, bugRecordDTO):
conn = None
cursor = None
try:
conn = psycopg2.connect(**self.database_params)
cursor = conn.cursor()
insert_query = (" INSERT INTO " + self.bug_table_name + """(
Title,
Traceback_info,
Resolved,
Version)
VALUES (%s, %s, %s, %s); """)
cursor.execute(insert_query,
(bugRecordDTO.title,
bugRecordDTO.tracebackInfo, bugRecordDTO.resolved, bugRecordDTO.version))
conn.commit()
except (Exception, psycopg2.Error) as e:
self.debugLogFile.writeToFile("Could not insert record into table " + str(e))
finally:
if conn:
cursor.close()
conn.close()
# update item in table
# the title of the bug is the primary key. To update date a specific record enter the title of the bug.
def update_record(self, title):
conn = None
cursor = None
try:
conn = psycopg2.connect(**self.database_params)
cursor = conn.cursor()
# When a bug is updated it means that it has been resolved so the bug_resolve flag is switched
# from false to true.
sql_update_query = (" UPDATE " + self.bug_table_name + """
SET Resolved = True
WHERE Resolved = %s
AND Title = %s """)
update_values = (False, title)
cursor.execute(sql_update_query, update_values)
conn.commit()
count = cursor.rowcount
print(count, "Record updated successfully")
except (Exception, psycopg2.Error) as e:
self.debugLogFile.writeToFile("Could not update record in table " + str(e))
finally:
if conn:
cursor.close()
conn.close()
# Retrieves all records with the same bug_title, bug_title is the primary key.
def retrieve_record(self, title):
conn = None
cursor = None
try:
conn = psycopg2.connect(**self.database_params)
cursor = conn.cursor()
sql_retrieve_query = ("SELECT * from " + self.bug_table_name + """
WHERE Title = %s """)
# record_name has to be a tuple or list to convert properly to execute the query.
sql_query_values = (title,)
cursor.execute(sql_retrieve_query, sql_query_values)
# retrieves all the records that were queried.
table_record = cursor.fetchall()
except (Exception, psycopg2.Error) as e:
self.debugLogFile.writeToFile("Could not retrieve record " + str(e))
finally:
if conn:
cursor.close()
conn.close()
return table_record | Danc2050/TheBugTracker | src/DatabaseScript.py | DatabaseScript.py | py | 6,480 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "src.DBConfig.config",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "src.DBConfig",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "psycopg2.connect",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
... |
16230419939 | import PyPDF2
import warnings
# Open the PDF file in read-binary mode
pdf_file = open('1.2.826.1.3680043.9.5282.150415.2352.16502352212057.pdf', 'rb')
pdf_reader = PyPDF2.PdfReader(pdf_file)
num_pages = len(pdf_reader.pages)
text = ""
for page_number in range(num_pages):
page = pdf_reader.pages[page_number]
page_text = page.extract_text()
text += page_text
# print(text)
# print(type(text))
import re
# text = "Name MR.JITENDRA BHUPTANI 383669 Patient ID AS_BOR_CT_2746 Accession No 164_02746_212057 Age/Gender 80Y / Male Referred By Dr.ANAND S. SHENAI MS Date 3-Oct-2021"
name_regex = r"Name\s+([A-Z\. ]+)"
patient_id_regex = r"Patient\s+ID\s+([\w-]+)"
accession_no_regex = r"Accession\s+No\s+([\d_]+)"
age_gender_regex = r"Age\/Gender\s+([\d]+[A-Za-z]*)\s+\/\s+([A-Za-z]+)"
date_regex = r"Date\s+(\d+-[A-Za-z]{3}-\d+)"
name = re.search(name_regex, text).group(1)
patient_id = re.search(patient_id_regex, text).group(1)
accession_no = re.search(accession_no_regex, text).group(1)
age, gender = re.search(age_gender_regex, text).groups()
date = re.search(date_regex, text).group(1)
print(f"Name: {name}")
print(f"Patient ID: {patient_id}")
print(f"Accession No: {accession_no}")
print(f"Age: {age}")
print(f"Gender: {gender}")
print(f"Date: {date}")
| Muthukumar4796/Text-recognition-in-radiology-report-using-NLP- | nlp_project.py | nlp_project.py | py | 1,325 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyPDF2.PdfReader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 31... |
38509693865 | import math
from django.shortcuts import render, get_object_or_404, redirect, reverse
from django.utils import timezone
from authentication.models import UserProfile
from issue_tracker.models import Issue, Comment
from django.contrib.auth.models import User
from issue_tracker.forms import IssueForm, CommentForm
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
def get_all_issues(request):
"""
Create a view that will return all the Issues from all Users
that were published prior to now and render them all to issues.html
template
"""
all_issues = Issue.objects.filter(published_date__lte=timezone.now()).order_by("-total_votes")
comments = Comment.objects.filter()
paginator = Paginator(all_issues, 9)
page = request.GET.get('page-issues')
try:
all_issues = paginator.page(page)
except PageNotAnInteger:
all_issues = paginator.page(1)
except EmptyPage:
all_issues = paginator.page(paginator.num_pages)
paginator.page(paginator.num_pages)
return render(request, "issues.html", {"all_issues": all_issues,
"comments": comments,
# "comment_count": comment_count,
})
def single_issue(request, pk):
"""
Create a view that returns an issue based on the issue id (pk)
and render it to viewissue.html or return 404 error if issue is not found
"""
issue = get_object_or_404(Issue, pk=pk)
comments = Comment.objects.filter(issue=issue).order_by("-id")
if request.method == "POST":
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
content = request.POST.get('content')
comment = Comment.objects.create(issue=issue,
user_logged_in=request.user,
content=content)
comment.save()
# Issue.objects.create(comment)
issue.comment_number += 1
issue.save()
return redirect(single_issue, issue.pk)
else:
comment_form = CommentForm()
author_image = []
try:
image = UserProfile.objects.get(user=issue.author).image
author_image.append({"image": image})
except:
author_image.append({"image": None})
comments_with_images = []
for comment in comments:
try:
image = UserProfile.objects.get(user=comment.user_logged_in).image
comments_with_images.append({"image": image, "comment": comment})
except:
comments_with_images.append({"image": None, "comment": comment})
comment_count = comments.count()
first_three_comments = comments_with_images[:3]
return render(request, "viewissue.html", {"author_image": author_image,
"issue": issue,
"comments_with_images": comments_with_images,
"comment_count": comment_count,
"comment_form": comment_form,
"first_three_comments": first_three_comments,
})
def create_or_edit_issue(request, pk=None):
"""
Create a view that allows the user to create or edit
an issue depending if the issue ID is null or not
"""
issue = get_object_or_404(Issue, pk=pk) if pk else None
if request.method == "POST":
form = IssueForm(request.POST, request.FILES, instance=issue)
if form.is_valid():
issue = form.save()
issue.author = request.user
issue.save()
return redirect(single_issue, issue.pk)
else:
form = IssueForm(instance=issue)
return render(request, "issueform.html", {"form": form})
def delete_issue(request, pk):
"""
author of issue can delete their posted issue
"""
issue = get_object_or_404(Issue, pk=pk)
issue.delete()
return redirect(get_all_issues)
@csrf_exempt
def vote(request, pk):
"""
User in session can vote for an issue if it's helped them
"""
user = request.user
issue = get_object_or_404(Issue, pk=pk)
vote_number = issue.total_votes
if user.is_authenticated():
if user in issue.vote.all():
issue.vote.remove(user)
up_vote = False
vote_number -= 1
else:
issue.vote.add(user)
up_vote = True
vote_number += 1
issue.total_votes = issue.vote.count()
issue.save()
data = {
"up_vote": up_vote,
"vote_number": vote_number
}
return JsonResponse(data)
@csrf_exempt
def done(request, pk):
"""
Author of issue can click button if 'admin' has fixed the issue in
their opinon
"""
issue = get_object_or_404(Issue, pk=pk)
data = {
"is_done": issue.done
}
is_done = request.POST.get('is_done')
if request.method == "POST":
if str(is_done) == "true":
issue.done = True
issue.save()
else:
issue.done = False
issue.save()
return JsonResponse(data)
| itoulou/unicorn-attractor | issue_tracker/views.py | views.py | py | 5,517 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "issue_tracker.models.Issue.objects.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "issue_tracker.models.Issue.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "issue_tracker.models.Issue",
"line_number": 21,
"usage_... |
18914571723 | import pytest
from src.can_place_flowers import Solution
@pytest.mark.parametrize(
"flowerbed,n,expected",
(
([1, 0, 0, 0, 1], 1, True),
([1, 0, 0, 0, 1], 2, False),
([1, 0, 0, 0, 0, 1], 2, False),
([0], 1, True),
),
)
def test_solution(flowerbed, n, expected):
assert Solution().canPlaceFlowers(flowerbed, n) is expected
| lancelote/leetcode | tests/test_can_place_flowers.py | test_can_place_flowers.py | py | 373 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "src.can_place_flowers.Solution",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
}
] |
5100700802 | #!/bin/env python
import streamlit as st
import pandas as pd
import os
import yaml
################################################################
def _main():
_filenames = os.listdir()
_filenames = [e for e in _filenames if os.path.splitext(e)[1] == '.json']
_filenames = [os.path.splitext(e)[0] for e in _filenames]
_filenames = [os.path.splitext(e)[0]
for e in _filenames if (not e.startswith('Master_')
and not e.startswith('match_'))]
_filenames.remove('EPFL')
option = st.selectbox(
"Select the university", _filenames, key="unisersity_selector_stat")
try:
with open(f'match_{option}.json', "r") as f:
courses_matches = yaml.safe_load(f.read())
except Exception:
st.error("Matching tab should be executed once at least")
return
df = pd.read_json(option+'.json')
df_epfl = pd.read_json('EPFL.json')
sorted_match = {}
for epfl_title, other_title in courses_matches.items():
_sel = df_epfl[df_epfl["Course Title"] == epfl_title]
if _sel.shape[0] == 0:
st.write(f"cannot find class: {epfl_title}")
continue
epfl_class = _sel.iloc[0]
epfl_year = epfl_class['Year']
epfl_semester = epfl_class['Semester']
other_class = df[df["Course Title"] == other_title].iloc[0]
if epfl_year not in sorted_match:
sorted_match[epfl_year] = {}
if epfl_semester not in sorted_match[epfl_year]:
sorted_match[epfl_year][epfl_semester] = []
sorted_match[epfl_year][epfl_semester].append(
(epfl_class, other_class))
def plot_class(layout, title, year, semester, ects):
layout.markdown(f"**{title}**, Year{year}-{semester} ({ects} ECTS)")
hide = st.checkbox("Show only mismatches")
def are_classes_different(classes):
cols = st.columns(len(classes))
ref_class = classes[0]
ref_title = ref_class['Course Title']
ref_year = ref_class['Year']
ref_semester = ref_class['Semester']
ref_ects = ref_class['ECTS']
for c, col in zip(classes[1:], cols[1:]):
title = c['Course Title']
year = c['Year']
semester = c['Semester']
ects = c['ECTS']
if year != ref_year:
year = f":red[{year}]"
if semester != ref_semester:
semester = f":red[{semester}]"
if ects != ref_ects:
ects = f":red[{ects}]"
if year == ref_year and semester == ref_semester and ects == ref_ects and hide:
return
plot_class(col, option + " " + title, year, semester, ects)
plot_class(cols[0], 'EPFL-' + ref_title,
ref_year, ref_semester, ref_ects)
with st.expander('description', expanded=False):
cols = st.columns(len(classes))
for c, col in zip(classes[:], cols[:]):
col.markdown('- ' + '\n - '.join(c['Description']))
for year, semesters in sorted_match.items():
for semester, matches in semesters.items():
st.markdown(f'### Year {year} - {semester} Semester')
for classes in matches:
are_classes_different(classes)
st.markdown('---')
def main():
with st.spinner('Loading'):
_main()
| anciaux/graph-tool-sgc-tc | plot_best_match.py | plot_best_match.py | py | 3,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line... |
34925500139 | import discord
from discord.ext import commands
from discord import Embed
from core.classes import *
class Global(CategoryExtension):
@commands.command()
@commands.has_permissions(manage_roles=True)
async def ping(self, ctx):
await ctx.send(f"Hey, it's me, HarTex! :eyes: Did you need something? - `{str(round(self.bot.latency * 1000))} ms`")
@commands.command()
async def about(self, ctx):
about_me = discord.Embed(colour=0xa6f7ff)
about_me.set_author(name="HarTex", icon_url="https://media.discordapp.net/attachments/582782144099778560/680318768907419668/your_profile_picture.jpg?width=450&height=450")
about_me.description = "HarTex is a bot meant for servers with advanced moderation needs, run by Harry.#2450 and maintained by the HarTex Development Team."
about_me.add_field(name="Whitelisted Servers", value=str(len(self.bot.guilds)), inline=False)
await ctx.send(embed=about_me)
@commands.command()
async def help(self, ctx, command=None):
"""
.help
A custom help command.
"""
if command is None:
helpEmbed = Embed(title="HarTex Help", description="Common Moderation Commands",
footer="Execute `help <command>` to get more information about one command.",
colour=0xa6f7ff)
helpEmbed.add_field(name="kick", value="Kicks a user.", inline=False)
helpEmbed.add_field(name="ban", value="Bans a user.", inline=False)
helpEmbed.add_field(name="unban", value="Unbans a user.", inline=False)
helpEmbed.add_field(name="mute", value="Mutes a user.", inline=False)
helpEmbed.add_field(name="unmute", value="Unmutes a user.", inline=False)
helpEmbed.add_field(name="tempmute", value="Temporarily mutes a user.", inline=False)
await ctx.send(embed=helpEmbed)
elif command == "kick":
helpKickEmbed = Embed(title="HarTex Help: Kick", description="Kick: Usage", colour=0xa6f7ff)
helpKickEmbed.add_field(name="Usage", value="kick <member: **discord.Member**> <reason: **optional**>",
inline=False)
helpKickEmbed.add_field(name="Description", value="Kicks a member.", inline=False)
await ctx.send(embed=helpKickEmbed)
elif command == "ban":
helpBanEmbed = Embed(title="HarTex Help: Ban", description="Ban: Usage", colour=0xa6f7ff)
helpBanEmbed.add_field(name="Usage", value="ban <member: **discord.Member**> <reason: **optional**>",
inline=False)
helpBanEmbed.add_field(name="Description", value="Bans a member.", inline=False)
await ctx.send(embed=helpBanEmbed)
elif command == "unban":
helpUnbanEmbed = Embed(title="HarTex Help: Unban", description="Unban: Usage", colour=0xa6f7ff)
helpUnbanEmbed.add_field(name="Usage", value="unban <member: **discord.Member**> <reason: **optional**>",
inline=False)
helpUnbanEmbed.add_field(name="Description", value="Unbans a member.", inline=False)
await ctx.send(embed=helpUnbanEmbed)
elif command == "mute":
helpMuteEmbed = Embed(title="HarTex Help: Mute", description="Mute: Usage", colour=0xa6f7ff)
helpMuteEmbed.add_field(name="Usage", value="mute <member: **discord.Member**> <reason: **optional**>",
inline=False)
helpMuteEmbed.add_field(name="Description", value="Mutes a member.", inline=False)
await ctx.send(embed=helpMuteEmbed)
elif command == "unmute":
helpUnmuteEmbed = Embed(title="HarTex Help: Unmute", description="Unmute: Usage", colour=0xa6f7ff)
helpUnmuteEmbed.add_field(name="Usage", value="unmute <member: **discord.Member**> <reason: **optional**>",
inline=False)
helpUnmuteEmbed.add_field(name="Description", value="Unmutes a member.", inline=False)
await ctx.send(embed=helpUnmuteEmbed)
elif command == "tempmute":
helpTempmuteEmbed = Embed(title="HarTex Help: Tempmute", description="Tempmute: Usage", colour=0xa6f7ff)
helpTempmuteEmbed.add_field(name="Usage",
value="tempmute <member: **discord.Member**> <time: **string**> <reason: **optional**>",
inline=False)
helpTempmuteEmbed.add_field(name="Description", value="Temporarily mutes a member.", inline=False)
await ctx.send(embed=helpTempmuteEmbed)
@commands.command()
async def staff(self, ctx):
staff_embed = discord.Embed(colour=0xa6f7ff)
staff_embed.set_author(name="HarTex Staff",
icon_url="https://media.discordapp.net/attachments/582782144099778560/680318768907419668/your_profile_picture.jpg?width=450&height=450")
staff_embed.description = "Without the people above, this bot cannot be this great!"
staff_embed.add_field(name="Global Administrator & Developer", value="Harry.#2450", inline=False)
staff_embed.add_field(name="Lead Developer", value="Harry.#2450", inline=False)
staff_embed.add_field(name="Development Team", value="OfficialAz3#0762", inline=False)
staff_embed.add_field(name="HarTex Support Team", value="Harry.#2450\nOfficialAz3#0762", inline=False)
await ctx.send(embed=staff_embed)
def setup(hartex):
hartex.add_cog(Global(hartex))
| HTG-YT/hartex-discord.py | HarTex/cmds/global.py | global.py | py | 5,665 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "discord.ext.commands.command",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.has_permissions",
"line_number": 11,
"usage_type": "call"
},
{... |
40556512110 | import os
import time
import requests
import telegram
import logging
from dotenv import load_dotenv, find_dotenv
logger = logging.getLogger('__name__')
class TelegramBotHandler(logging.Handler):
def __init__(self, log_bot, chat_id):
super().__init__()
self.chat_id = chat_id
self.log_bot = log_bot
def emit(self, record):
log_entry = self.format(record)
self.log_bot.send_message(text=log_entry, chat_id=self.chat_id)
def main():
load_dotenv(find_dotenv())
token = os.environ['TOKEN']
bot_token = os.environ['BOT_TOKEN']
log_bot_token = os.environ['LOG_BOT_TOKEN']
chat_id = os.environ['CHAT_ID']
bot = telegram.Bot(bot_token)
log_bot = telegram.Bot(log_bot_token)
logger.setLevel(logging.INFO)
logger.addHandler(TelegramBotHandler(log_bot, chat_id))
try:
url = 'https://dvmn.org/api/long_polling/'
header = {
'Authorization': f'Token {token}'
}
timestamp = ''
logger.info('Bot started')
while True:
try:
params = {'timestamp': timestamp}
response = requests.get(url, headers=header, params=params, timeout=120)
response.raise_for_status()
except requests.exceptions.ReadTimeout:
logger.info('Надо ж дать')
continue
except requests.ConnectionError as err:
time.sleep(180)
logger.error(err)
continue
response_attempts = response.json()
if 'timestamp_to_request' in response_attempts:
timestamp = response_attempts['timestamp_to_request']
else:
timestamp = response_attempts['last_attempt_timestamp']
new_attempts = response_attempts['new_attempts']
for attempt in new_attempts:
lesson_title = attempt['lesson_title']
if attempt['is_negative']:
add_text = 'К сожалению, в работе нашлись ошибки.'
else:
add_text = 'Преподавателю все понравилось, можно приступать к следующему уроку!'
lesson_url = attempt['lesson_url']
bot.send_message(text=f'У Вас проверили работу «{lesson_title}»\n\n{add_text}\n\n {lesson_url}', chat_id=chat_id)
except Exception as err:
logger.fatal(err)
if __name__ == '__main__':
main()
| Kilsik/Check_Devman_lessons | main.py | main.py | py | 2,618 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.Handler",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dotenv.find... |
20475078540 | import pytz
import json
import os
import mimetypes
import requests
import tempfile
from django import forms
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.core.files.images import ImageFile, get_image_dimensions
from django.conf import settings
from allauth.socialaccount.models import SocialAccount
from .models import User, ScheduledPost
class SplitDateTimeWidget(forms.SplitDateTimeWidget):
def __init__(self, *args, **kwargs):
super(SplitDateTimeWidget, self).__init__(*args, **kwargs)
self.widgets[0].input_type = "date"
self.widgets[1].input_type = "time"
class ScheduledPostAddForm(forms.ModelForm):
scheduled_datetime = forms.SplitDateTimeField(
widget=SplitDateTimeWidget
)
scheduled_tz = forms.CharField(widget=forms.HiddenInput,
initial="UTC", required=False)
status = forms.CharField(
widget=forms.Textarea(attrs={"rows": None, "cols": None})
)
attached_media = forms.ImageField(required=False)
media_url = forms.URLField(required=False)
def __init__(self, *args, **kwargs):
user = kwargs.pop("user")
super(ScheduledPostAddForm, self).__init__(*args, **kwargs)
one_hour_hence = timezone.now() + timezone.timedelta(hours=1)
self.fields["scheduled_datetime"].initial = one_hour_hence
self.fields["service"].choices = []
if SocialAccount.objects.filter(user=user, provider="facebook").count():
self.fields["service"].choices.append(
("facebook", "Facebook"),
)
if SocialAccount.objects.filter(user=user, provider="twitter").count():
self.fields["service"].choices.append(
("twitter", "Twitter"),
)
class Meta:
model = ScheduledPost
fields = ("scheduled_datetime", "service", "status", "attached_media")
def clean_scheduled_datetime(self):
scheduled_datetime = self.cleaned_data["scheduled_datetime"]
if scheduled_datetime < timezone.now():
raise forms.ValidationError(_("Time cannot be in the past"))
return scheduled_datetime
def clean(self):
data = self.cleaned_data
if data.get("scheduled_datetime"):
sched_dt = data["scheduled_datetime"]
sched_tz = timezone.pytz.timezone(data.get("scheduled_tz"))
sched_dt = sched_tz.localize(sched_dt.replace(tzinfo=None))
data["scheduled_datetime"] = timezone.localtime(sched_dt)
if data.get("attached_media") and data.get("media_url"):
raise forms.ValidationError(_("Only one of media URL or "
"attached media may be provided"))
if data.get("media_url"):
response = requests.get(data["media_url"])
if not response.ok:
raise forms.ValidationError(_("An error occurred while "
"downloading the media from the URL"))
ext = mimetypes.guess_extension(response.headers['content-type'])
ff = tempfile.NamedTemporaryFile(suffix=ext)
ff.write(response.content)
img_file = ImageFile(ff, name=ff.name)
height, width = get_image_dimensions(img_file)
if height is None or width is None:
ff.close()
raise forms.ValidationError(_("Invalid image"))
data["attached_media"] = img_file
return data
| theju/smp | scheduler/forms.py | forms.py | py | 3,604 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "django.forms.SplitDateTimeWidget",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 27,
"usage_type": "attribute"
},
{
"ap... |
74328989543 | #!/usr/bin/env python3
import csv
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
import requests
import sys
HEADERS = {'Authorization' : 'Bearer key1agtUnabRLb2LS', 'accept' : 'text/plain'}
BASE_URL = 'https://api.airtable.com/v0/appj3UWymNh6FgtGR/'
VIEW = 'view=Grid%20view'
# Values related to the place table
PLACE_TABLE_NAME = 'Help%20Services'
PLACE_TABLE_VAR = 'placesTableCached'
PLACE_TABLE_MAP = {
'name' : 'Name',
'name_es' : 'Name-ES',
'catSubcatId' : 'CatSubcat',
'city' : 'City',
'category' : 'Category',
'subcategory' : 'Subcategory',
'phone' : 'Phone Number',
'address' : 'Physical Address',
'latitude' : 'Latitude',
'longitude' : 'Longitude',
'url' : 'Web address',
'email' : 'Email Address',
'hours' : 'Hours of operation',
'hours_es' : 'Hours of operation-ES',
'description' : 'Description',
'description_es' : 'Description-ES',
'wheelchair' : 'Wheelchair access (y)',
'languageHelp' : 'Language Help (y)',
'schedule' : 'schedule',
}
# Values related to the Category table
CATEGORY_TABLE_NAME = 'Categories'
CATEGORY_TABLE_VAR = 'categoryTableCached'
CATEGORY_TABLE_MAP = {
'name' : 'Name',
'name_es' : 'Name-ES',
'subcategories' : 'Subcategories',
}
# Values related to the Subcategory table
SUBCATEGORY_TABLE_NAME = 'Subcategories'
SUBCATEGORY_TABLE_VAR = 'subcategoryTableCached'
SUBCATEGORY_TABLE_MAP = {
'categoryId' : 'Category',
'name' : 'Name',
'name_es' : 'Name-ES',
}
# Values related to the CatSubcat table
CATSUBCAT_TABLE_NAME = 'CatSubcats'
CATSUBCAT_TABLE_VAR = 'catSubcatTableCached'
# Cat Subcat handled specially, so doesn't use table map.
CATSUBCAT_TABLE_MAP = {}
# Values related to the Cities table
CITY_TABLE_NAME = 'Cities'
CITY_TABLE_VAR = 'cityTableCached'
CITY_TABLE_MAP = {
'name' : 'Name',
}
ALERT_TABLE_NAME = 'Alerts'
ALERT_TABLE_VAR = 'alertTableCached'
ALERT_TABLE_MAP = {
'title' : 'Title',
'displayDate' : 'Display Date',
'startDate' : 'StartDate',
'endDate' : 'EndDate',
'note' : 'Notes',
}
SCHEDULE_TABLE_NAME = 'schedule'
SCHEDULE_TABLE_VAR = 'scheduleTableCached'
SCHEDULE_TABLE_MAP = {
'byDay' : 'byday',
'opensAt' : 'opens_at',
'closesAt' : 'closes_at',
'byMonthDay' : 'bymonthday',
'validFrom' : 'valid_from',
'validTo' : 'valid_to',
}
# Make a record in our desired output format
def make_record(record_in, key_pairs):
record_out = {}
record_out["id"] = record_in["id"]
for key in key_pairs:
new_key = key
old_key = key_pairs[new_key]
record_out[new_key] = ''
if old_key in record_in['fields']:
record_out[new_key] = record_in['fields'][old_key]
return record_out
# Make a record in our desired output format -- special handling for
# special CatSubcat features
def make_record_catsubcat(record_in):
catSubcatId = record_in["id"]
catSubcatName = record_in['fields']['Name']
categoryId = record_in['fields']['Category'][0]
subcategoryId = ''
subcategoryName = ''
subcategoryNameSpanish = ''
if 'Subcategory' in record_in['fields']:
subcategoryId = record_in['fields']['Subcategory'][0]
subcategoryName = record_in['fields']['SubcategoryString'][0]
subcategoryNameSpanish = record_in['fields']['Subcategory-ES'][0]
record_out = {
'catSubcatId': catSubcatId,
'catSubcatName': catSubcatName,
'categoryId' : categoryId,
'subcategoryId' : subcategoryId,
'name' : subcategoryName,
'name_es' : subcategoryNameSpanish,
'places' : []
}
return record_out
# Use map to convert a table to the form we want. Note the special code for the
# CatSubcat table because it is structured a little different from the others.
def table_map(table_name, table_raw, key_pairs):
if table_name == CATSUBCAT_TABLE_NAME:
table = map(lambda record: make_record_catsubcat(record), table_raw)
else:
table = map(lambda record: make_record(record, key_pairs), table_raw)
table = list(table)
return table
# Air table only allows pages of up to 100 records at a time. This gets a page
def get_page(url):
r = requests.get(url, headers=HEADERS)
data = r.json()
records = data['records']
offset = None
if 'offset' in data:
offset = data['offset']
return records, offset
def get_table(table_name, mapping):
# Get the table data from air table
table_url = BASE_URL+table_name
page, offset = get_page(table_url+'?'+VIEW)
table_raw = page
# Air table only allows 100 records at a time, so loop to get them all
while offset:
page, offset = get_page(table_url+'?offset='+offset+'&'+VIEW)
table_raw.extend(page)
# Map it into the form we need
table = table_map(table_name, table_raw, mapping)
return table
def do_table(table_name, mapping, var_name, f):
table = get_table(table_name, mapping)
# Write it into the javascript file
print('const', var_name, '=', table, ';', file=f)
def do_mailmerge(dir, place_table, category_table, catsubcat_table, language_str):
mailmerge_table = []
for record in place_table:
catsubcats = record['catSubcatId']
for cat_subcat_id in catsubcats:
# Wrangle the cats and subcats from the catSubcat table
catsubcat_record = list(filter(lambda catsubcat_rec: catsubcat_rec['catSubcatId'] == cat_subcat_id, catsubcat_table))
# Assuming the above returns exactly one record
category_id = catsubcat_record[0]['categoryId']
category_record = list(filter(lambda cat_rec: cat_rec['id'] == category_id, category_table))
# Assuming the above returns exactly one record
category_str = category_record[0]['name'+language_str]
subcategory_str = catsubcat_record[0]['name'+language_str]
mailmerge_table.append({
'Category' : category_str,
'Subcategory' : subcategory_str,
'Service Name' : record['name'],
'Phone Number' : record['phone'],
'Physical Address' : record['address'],
'Hours of operation' : record['hours'],
'Description' : record['description'+language_str],
'Wheelchair access (y)' : record['wheelchair'],
'Language Help (y)' : record['languageHelp'],
'Web address' : record['url'],
'Email Address' : record['email'],
})
mailmerge_table = sorted(mailmerge_table, key = lambda i: (i['Category'], i['Subcategory'], i['Service Name']))
# Save as csv file
csv_columns = list(mailmerge_table[0].keys())
filename = dir+'final_book'+language_str
csv_file = filename+'.csv'
xls_file = filename+'.xls'
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in mailmerge_table:
writer.writerow(data)
except IOError:
print("I/O error")
exit
# Convert the csv file to an Excel file
f = open(csv_file, 'r')
reader = csv.reader(f)
wb = Workbook()
ws = wb.worksheets[0]
ws.title = "Sheet1"
for row_index, row in enumerate(reader):
for column_index, cell in enumerate(row):
column_letter = get_column_letter((column_index + 1))
ws[column_letter+str(row_index+1)] = cell
wb.save(filename = xls_file)
dir = './'
if len(sys.argv) > 1:
dir = sys.argv[1]
if dir[-1] != '/':
dir += '/'
# Process each table
f = open(dir+'cachedInlineTables.js', 'w')
do_table(PLACE_TABLE_NAME, PLACE_TABLE_MAP, PLACE_TABLE_VAR, f)
do_table(CATEGORY_TABLE_NAME, CATEGORY_TABLE_MAP, CATEGORY_TABLE_VAR, f)
do_table(SUBCATEGORY_TABLE_NAME, SUBCATEGORY_TABLE_MAP, SUBCATEGORY_TABLE_VAR, f)
do_table(CATSUBCAT_TABLE_NAME, CATSUBCAT_TABLE_MAP, CATSUBCAT_TABLE_VAR, f)
do_table(CITY_TABLE_NAME, CITY_TABLE_MAP, CITY_TABLE_VAR, f)
do_table(ALERT_TABLE_NAME, ALERT_TABLE_MAP, ALERT_TABLE_VAR, f)
do_table(SCHEDULE_TABLE_NAME, SCHEDULE_TABLE_MAP, SCHEDULE_TABLE_VAR, f)
place_table = get_table(PLACE_TABLE_NAME, PLACE_TABLE_MAP)
category_table = get_table(CATEGORY_TABLE_NAME, CATEGORY_TABLE_MAP)
catsubcat_table = get_table(CATSUBCAT_TABLE_NAME, CATSUBCAT_TABLE_MAP)
do_mailmerge(dir, place_table, category_table, catsubcat_table, '')
do_mailmerge(dir, place_table, category_table, catsubcat_table, '_es') | OpenEugene/little-help-book-web | table-of-contents-style-homepage/scripts/get_table.py | get_table.py | py | 8,591 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "openpyxl.Workbook",
"li... |
37166463882 | import math
import matplotlib.pyplot as plt
def integral(n):
val = [1 - math.exp(-1)]
if n < 1:
return val
for i in range(1,n+1):
val.append(1 - i*val[i-1])
return val
n = 20
x = range(0,n+1)
y = integral(n)
plt.plot(x, y, 'r')
plt.xlabel('k')
plt.ylabel('I(k)')
plt.show() | Aditi-Singla/Numerical-Algorithms | Homework 1/q7_integral.py | q7_integral.py | py | 300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.exp",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xl... |
26819728584 |
# import math
import requests
import re
def get_url():
return ''.join([
'http'+ ':' + '//',
'.'.join(['www', 'cyber'+'syndrome', 'net']),
'/'+'search.cgi'+'?' + '&'.join(['q=JP', 'a=A', 'f=d', 's=new', 'n=100'])
])
def read_page():
url = get_url()
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
headers = {
'User-Agent': ua,
}
session = requests.Session()
session.headers.update(headers)
res = session.get(url)
return res.text
def parse_as(html):
re_str = r'as=\[([\d,]+)\];'
r = re.search(re_str, html)
res = ''
if r:
res = r.group(1).split(',')
# print('as:', res)
return res
def parse_ps(html):
re_str = r'var ps=\[([\d,]+)\];'
r = re.search(re_str, html)
res = ''
if r:
res = r.group(1).split(',')
# print('ps:', res)
return res
def parse_n(html, ps):
re_str = r'var n=(\(.+?\)\%\d+);'
r = re.search(re_str, html)
res = ''
if r:
_n = r.group(1)
# print('_n:', _n)
for r2 in re.findall(r'ps\[(\d+)\]', _n):
# print('n:r2:', r2)
_n = _n.replace('ps[%s]'%r2, ps[int(r2)])
# print('_n:', _n)
res = eval(_n)
# print('n:', res)
return res
def decode_ip_addrs(var_as, var_ps, var_n):
as1 = var_as[0:var_n]
as2 = var_as[var_n:]
# tmp = {}
# for i, a in enumerate(as2+as1):
# idx = math.floor(i/4)
# # print('idx:', idx)
# if not idx in tmp:
# # tmp[idx] = [None, None, None, None]
# tmp[idx] = []
# # if i%4 == 0:
# # tmp[idx].append(a)
# # elif i%4 == 3:
# # tmp[idx].append(a)
# # else:
# # tmp[idx].append(a)
# tmp[idx].append(a)
# for i in sorted(tmp.keys()):
# for i in tmp.keys():
# # print('n%s:'%(i+1), '%s:%s'%(''.join(tmp[i]), ps[i]))
# addrs = '%s:%s'%('.'.join(tmp[i]), var_ps[i])
# res.append(addrs)
# # print('n%s:'%(i+1), addrs)
res = []
tmp = []
for i, a in enumerate(as2+as1):
tmp.append(a)
if len(tmp) == 4:
ps_idx = len(res)
adrs = '.'.join(tmp) + ':' + var_ps[ps_idx]
res.append(adrs)
tmp = []
return res
def get_proxies():
html = read_page()
# print(html)
var_as = parse_as(html)
var_ps = parse_ps(html)
var_n = parse_n(html, var_ps)
ips = decode_ip_addrs(var_as, var_ps, var_n)
return ips
if __name__ == '__main__':
proxies = get_proxies()
print(proxies)
| otsutomesan/proxy80 | proxy80/proxy80.py | proxy80.py | py | 2,426 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 4... |
74286578664 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import math
import numpy as np
import pytest
from .. import ChainedNeqSys, ConditionalNeqSys, NeqSys
try:
import pynleq2 # noqa
except ImportError:
HAVE_PYNLEQ2 = False
else:
HAVE_PYNLEQ2 = True
def f(x, params):
# docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
return [
x[0] + (x[0] - x[1]) ** params[0] / 2 - 1,
(x[1] - x[0]) ** params[0] / 2 + x[1],
]
def j(x, params):
return [
[
1 + params[0] / 2 * (x[0] - x[1]) ** (params[0] - 1),
-params[0] / 2 * (x[0] - x[1]) ** (params[0] - 1),
],
[
-params[0] / 2 * (x[1] - x[0]) ** (params[0] - 1),
1 + params[0] / 2 * (x[1] - x[0]) ** (params[0] - 1),
],
]
def _test_fail(solver, **kwargs):
def _f(x, p):
return [p[0] + x[0] ** 2]
def _j(x, p):
return [[2 * x[0]]]
ns = NeqSys(1, 1, _f, jac=_j)
x, res = ns.solve([1], [1], solver=solver, **kwargs)
assert len(x) == 1
assert abs(x[0]) < 1e-8
assert not res["success"]
def test_fail():
_test_fail("scipy")
def test_neqsys_rms():
ns = NeqSys(2, 2, f)
x = [[1, 0], [2, 1], [3, 2], [7, 4], [5, 13]]
p = [3]
rms = ns.rms(x, p)
ref = [np.sqrt(np.sum(np.square(f(x[i], p))) / 2) for i in range(5)]
assert np.allclose(rms, ref)
def _test_neqsys_params(solver, **kwargs):
ns = NeqSys(2, 2, f, jac=j)
x, sol = ns.solve([0, 0], [3], solver=solver, **kwargs)
assert abs(x[0] - 0.8411639) < 2e-7
assert abs(x[1] - 0.1588361) < 2e-7
def _test_neqsys_solve_series(solver):
ns = NeqSys(2, 2, f, jac=j)
x, sol = ns.solve_series(solver, [0, 0], [0], var_data=[2, 3], var_idx=0)
assert abs(x[0, 0] - 0.5) < 2e-7
assert abs(x[0, 1] + 0.5) < 2e-7
assert abs(x[1, 0] - 0.8411639) < 2e-7
assert abs(x[1, 1] - 0.1588361) < 2e-7
def test_neqsys_params_scipy():
_test_neqsys_params("scipy")
@pytest.mark.skipif(not HAVE_PYNLEQ2, reason="pynleq2 not installed on system.")
def test_neqsys_params_nleq2():
_test_neqsys_params("nleq2")
def _test_neqsys_no_params(solver, **kwargs):
ns = NeqSys(2, 2, lambda x: f(x, [3]), jac=lambda x: j(x, [3]))
x, sol = ns.solve([0, 0], solver=solver, **kwargs)
assert abs(x[0] - 0.8411639) < 2e-7
assert abs(x[1] - 0.1588361) < 2e-7
def test_neqsys_no_params_scipy():
_test_neqsys_no_params("scipy")
@pytest.mark.skipif(not HAVE_PYNLEQ2, reason="pynleq2 not installed on system.")
def test_neqsys_no_params_nleq2():
_test_neqsys_no_params("nleq2")
def test_ConditionalNeqSys1():
from math import pi, sin
def f_a(x, p):
return [sin(p[0] * x[0])] # when x <= 0
def f_b(x, p):
return [x[0] * (p[1] - x[0])] # when x >= 0
def factory(conds):
return NeqSys(1, 1, f_b) if conds[0] else NeqSys(1, 1, f_a)
cneqsys = ConditionalNeqSys(
[(lambda x, p: x[0] > 0, lambda x, p: x[0] >= 0)], factory
)
x, sol = cneqsys.solve([0], [pi, 3], solver="scipy")
assert sol["success"]
assert abs(x[0]) < 1e-13
x, sol = cneqsys.solve([-1.4], [pi, 3], solver="scipy")
assert sol["success"]
assert abs(x[0] + 1) < 1e-13
x, sol = cneqsys.solve([2], [pi, 3], solver="scipy")
assert sol["success"]
assert abs(x[0] - 3) < 1e-13
x, sol = cneqsys.solve([7], [pi, 3], solver="scipy")
assert sol["success"]
assert abs(x[0] - 3) < 1e-13
def _check_NaCl(cneqsys, guesses, cases=-1, **kwargs):
# Sodium chloride (NaCl) precipitation
_init_final = [
([1, 1, 1], [2, 2, 0]),
([1, 1, 0], [1, 1, 0]),
([3, 3, 3], [2, 2, 4]),
([2, 2, 0], [2, 2, 0]),
([2 + 1e-8, 2 + 1e-8, 0], [2, 2, 1e-8]),
([3, 3, 0], [2, 2, 1]),
([0, 0, 3], [2, 2, 1]),
([0, 0, 2], [2, 2, 0]),
([2, 2, 2], [2, 2, 2]),
]
info_dicts = []
for init, final in _init_final[:cases]:
print(init)
for guess in guesses:
print(guess)
if guess is None:
guess = init
x, info_dict = cneqsys.solve(guess, init + [4], solver="scipy", **kwargs)
assert info_dict["success"] and np.allclose(x, final)
info_dicts.append(info_dict)
return info_dicts
def _factory_lin(conds):
# This is an example of NaCl precipitation
# x = Na+, Cl-, NaCl(s)
# p = [Na+]0, [Cl-]0, [NaCl(s)]0, Ksp
# f[0] = x[0] + x[2] - p[0] - p[2]
# f[1] = x[1] + x[2] - p[1] - p[2]
# switch to precipitation: x[0]*x[1] > p[3]
# keep precipitation if: x[2] > 0
#
# If we have a precipitate
# f[2] = x[0]*x[1] - p[3]
# otherwise:
# f[2] = x[2]
precip = conds[0]
def cb(x, p):
f = [None] * 3
f[0] = x[0] + x[2] - p[0] - p[2]
f[1] = x[1] + x[2] - p[1] - p[2]
if precip:
f[2] = x[0] * x[1] - p[3]
else:
f[2] = x[2]
return f
return NeqSys(3, 3, cb)
def _factory_log(small):
# This is equivalent to _factory_lin
# but this time variable transformations
# are performed
def _inner_factory(conds):
precip = conds[0]
def pre_processor(x, p):
return np.log(np.asarray(x) + math.exp(small)), p
def post_processor(x, p):
return np.exp(x), p
def fun(x, p):
f = [None] * 3
f[0] = math.exp(x[0]) + math.exp(x[2]) - p[0] - p[2]
f[1] = math.exp(x[1]) + math.exp(x[2]) - p[1] - p[2]
if precip:
f[2] = x[0] + x[1] - math.log(p[3])
else:
f[2] = x[2] - small
return f
def jac(x, p):
jout = np.empty((3, 3))
jout[0, 0] = math.exp(x[0])
jout[0, 1] = 0
jout[0, 2] = math.exp(x[2])
jout[1, 0] = 0
jout[1, 1] = math.exp(x[1])
jout[1, 2] = math.exp(x[2])
if precip:
jout[2, 0] = 1
jout[2, 1] = 1
jout[2, 2] = 0
else:
jout[2, 0] = 0
jout[2, 1] = 0
jout[2, 2] = 1
return jout
return NeqSys(
3,
3,
fun,
jac,
pre_processors=[pre_processor],
post_processors=[post_processor],
)
return _inner_factory
def _get_cneqsys2():
return ConditionalNeqSys(
[(lambda x, p: x[0] * x[1] > p[3], lambda x, p: x[2] > 0)], _factory_lin
)
def test_ConditionalNeqSys2():
_check_NaCl(_get_cneqsys2(), [(1, 1, 1), (1, 1, 0), (2, 2, 0), (1, 1, 3)])
def _get_cneqsys3(small):
return ConditionalNeqSys(
[(lambda x, p: x[0] * x[1] > p[3], lambda x, p: x[2] > math.exp(small))],
_factory_log(small),
)
def test_ConditionalNeqSys3():
_check_NaCl(_get_cneqsys3(-60), [None], 4, method="lm")
def test_version():
from pyneqsys import __version__
assert int(__version__.split(".")[0]) >= 0
def test_solve_series():
neqsys = NeqSys(1, 1, lambda x, p: [x[0] - p[0]])
xout, sols = neqsys.solve_series([0], [0], [0, 1, 2, 3], 0, solver="scipy")
assert np.allclose(xout[:, 0], [0, 1, 2, 3])
def test_ChainedNeqSys():
neqsys_log = _get_cneqsys3(-60)
neqsys_lin = _get_cneqsys2()
chained = ChainedNeqSys([neqsys_log, neqsys_lin])
info_dicts = _check_NaCl(chained, [None], 2, method="lm")
for nfo in info_dicts:
assert (
nfo["intermediate_info"][0]["success"]
and nfo["intermediate_info"][1]["success"]
)
_powell_ref = (0.0001477105829534399, 6.769995622556115071)
def _powell(x, params, backend=math):
A, exp = params[0], backend.exp
return A * x[0] * x[1] - 1, exp(-x[0]) + exp(-x[1]) - (1 + A ** -1)
def _test_powell(sys_solver_pairs, x0=(1, 1), par=(1000.0,)):
for sys, solver in sys_solver_pairs:
x0, info = sys.solve(x0, par, solver=solver, tol=1e-12)
assert info["success"]
x = sorted(x0)
assert abs(_powell_ref[0] - x[0]) < 2e-11
assert abs(_powell_ref[1] - x[1]) < 6e-10
def test_chained_solvers():
powell_numpy = NeqSys(2, 2, _powell)
powell_mpmath = NeqSys(2, 2, _powell)
_test_powell([(powell_numpy, None), (powell_mpmath, "mpmath")])
def test_x_by_name():
powell_sys = NeqSys(2, f=_powell, names=["u", "v"], x_by_name=True)
_test_powell(zip([powell_sys] * 2, [None, "mpmath"]), x0={"u": 1, "v": 1})
| bjodah/pyneqsys | pyneqsys/tests/test_core.py | test_core.py | py | 8,592 | python | en | code | 38 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number":... |
8841422938 | from __future__ import print_function
import logging
import re
from db_endpoint import DBEndpoint,TCPFrameClient
from utils import blockify
from thread_executor import ThreadExecutor, ThreadExecutorError
from ida_ts import get_func_length, get_func_data, get_func_comment,\
set_func_comment, Functions, first_func_addr, GetFunctionName,\
is_func_chunked, make_name
class FCatalogClientError(Exception): pass
logger = logging.getLogger(__name__)
# Minimum function size (in bytes) to be considered when trying to find
# similars.
MIN_FUNC_LENGTH = 0x60
FCATALOG_FUNC_NAME_PREFIX = 'FCATALOG__'
FCATALOG_COMMENT_PREFIX = '%%%'
# The grade of similarity for each function is a number between 0 and this
# constant (Inclusive):
MAX_SIM_GRADE = 16
# Amount of similar functions to return in every inquiry for similars function
# for a specific function:
NUM_SIMILARS = 1
# Amount of functions to be sent together to remote server when looking for
# similars:
GET_SIMILARS_BATCH_SIZE = 20
#########################################################################
def is_func_fcatalog(func_addr):
"""
Have we obtained the name for this function from fcatalog server?
We know this by the name of the function.
"""
logger.debug('is_func_fcatalog {}'.format(func_addr))
func_name = GetFunctionName(func_addr)
return func_name.startswith(FCATALOG_FUNC_NAME_PREFIX)
def is_func_long_enough(func_addr):
"""
Check if a given function is of suitable size to be commited.
"""
logger.debug('is_func_long_enough {}'.format(func_addr))
func_length = get_func_length(func_addr)
if func_length < MIN_FUNC_LENGTH:
return False
return True
###########################################################################
def strip_comment_fcatalog(comment):
"""
Remove all fcatalog comments from a given comment.
"""
res_lines = []
# Get only lines that don't start with FCATALOG_COMMENT_PREFIX:
lines = comment.splitlines()
for ln in lines:
if ln.startswith(FCATALOG_COMMENT_PREFIX):
continue
res_lines.append(ln)
return '\n'.join(res_lines)
def add_comment_fcatalog(comment,fcatalog_comment):
"""
Add fcatalog comment to a function.
"""
res_lines = []
# Add the fcatalog_comment lines with a prefix:
for ln in fcatalog_comment.splitlines():
res_lines.append(FCATALOG_COMMENT_PREFIX + ' ' + ln)
# Add the rest of the comment lines:
for ln in comment.splitlines():
res_lines.append(ln)
return '\n'.join(res_lines)
def make_fcatalog_name(func_name,sim_grade,func_addr):
"""
Make an fcatalog function name using function name and sim_grade.
"""
lres = []
lres.append(FCATALOG_FUNC_NAME_PREFIX)
lres.append('{:0>2}__'.format(sim_grade))
lres.append(func_name)
lres.append('__{:0>8X}'.format(func_addr & 0xffffffff))
return ''.join(lres)
###########################################################################
class FCatalogClient(object):
def __init__(self,remote,db_name,exclude_pattern=None):
# Keep remote address:
self._remote = remote
# Keep remote db name:
self._db_name = db_name
# A thread executor. Allows only one task to be run every time.
self._te = ThreadExecutor()
# A regexp pattern that identifies functions that are not named, and
# should be ignored.
self._exclude_pattern = exclude_pattern
# A thread safe print function. I am not sure if this is rquired. It is
# done to be one the safe side:
self._print = print
def _is_func_named(self,func_addr):
"""
Check if a function was ever named by the user.
"""
logger.debug('_is_func_named {}'.format(func_addr))
func_name = GetFunctionName(func_addr)
# Avoid functions like sub_409f498:
if func_name.startswith('sub_'):
return False
# If exclude_pattern was provided, make sure that the function
# name does not match it:
if self._exclude_pattern is not None:
mt = re.match(self._exclude_pattern,func_name)
if mt is not None:
return False
# Avoid reindexing FCATALOG functions:
if is_func_fcatalog(func_addr):
return False
return True
def _is_func_commit_candidate(self,func_addr):
"""
Is this function a candidate for committing?
"""
# Don't commit if chunked:
if is_func_chunked(func_addr):
return False
if not self._is_func_named(func_addr):
return False
if not is_func_long_enough(func_addr):
return False
return True
def _is_func_find_candidate(self,func_addr):
"""
Is this function a candidate for finding from database (Finding similars
for this function?)
"""
if is_func_chunked(func_addr):
return False
if self._is_func_named(func_addr):
return False
if not is_func_long_enough(func_addr):
return False
return True
def _iter_func_find_candidates(self):
"""
Iterate over all functions that are candidates for finding similars from
the remote database.
This function is IDA read thread safe.
"""
for func_addr in Functions():
if self._is_func_find_candidate(func_addr):
yield func_addr
def _commit_funcs_thread(self):
"""
Commit all the named functions from this idb to the server.
This is an IDA read thread safe function.
"""
self._print('Commiting functions...')
# Set up a connection to remote db:
frame_endpoint = TCPFrameClient(self._remote)
fdb = DBEndpoint(frame_endpoint,self._db_name)
for func_addr in Functions():
logger.debug('Iterating over func_addr: {}'.format(func_addr))
if not self._is_func_commit_candidate(func_addr):
continue
func_name = GetFunctionName(func_addr)
func_comment = strip_comment_fcatalog(get_func_comment(func_addr))
func_data = get_func_data(func_addr)
# If we had problems reading the function data, we skip it.
if func_data is None:
self._print('!> Skipping {}'.format(func_name))
continue
fdb.add_function(func_name,func_comment,func_data)
self._print(func_name)
# Close db:
fdb.close()
self._print('Done commiting functions.')
def commit_funcs(self):
"""
Commit all functions from this IDB to the server.
"""
try:
t = self._te.execute(self._commit_funcs_thread)
except ThreadExecutorError:
print('Another operation is currently running. Please wait.')
def _batch_similars(self,fdb,l_func_addr):
"""
Given a list of function addresses, request similars for each of those
functions. Then wait for all the responses, and return a list of tuples
of the form: (func_addr,similars)
This function is IDA read thread safe.
"""
# Send requests for similars for every function in l_func_addr list:
for func_addr in l_func_addr:
func_data = get_func_data(func_addr)
fdb.request_similars(func_data,1)
# Collect responses from remote server:
lres = []
for func_addr in l_func_addr:
similars = fdb.response_similars()
lres.append((func_addr,similars))
return lres
def _find_similars_thread(self,similarity_cut,batch_size):
"""
For each unnamed function in this database find a similar functions
from the fcatalog remote db, and rename appropriately.
This thread is IDA write thread safe.
"""
self._print('Finding similars...')
# Set up a connection to remote db:
frame_endpoint = TCPFrameClient(self._remote)
fdb = DBEndpoint(frame_endpoint,self._db_name)
# Iterate over blocks of candidate functions addresses:
for l_func_addr in blockify(self._iter_func_find_candidates(),\
batch_size):
# Send block to remote server and get results:
bsimilars = self._batch_similars(fdb,l_func_addr)
# Iterate over functions and results:
for func_addr,similars in bsimilars:
if len(similars) == 0:
# No similars found.
continue
# Get the first entry (Highest similarity):
fsim = similars[0]
# Discard if doesn't pass the similarity cut:
if fsim.sim_grade < similarity_cut:
continue
old_name = GetFunctionName(func_addr)
# Generate new name:
new_name = make_fcatalog_name(fsim.name,fsim.sim_grade,func_addr)
# If name matches old name, skip:
if new_name == old_name:
continue
# Set function to have the new name:
make_name(func_addr,new_name)
# Add the comments from the fcatalog entry:
func_comment = get_func_comment(func_addr)
func_comment_new = \
add_comment_fcatalog(func_comment,fsim.comment)
set_func_comment(func_addr,func_comment_new)
self._print('{} --> {}'.format(old_name,new_name))
# Close db:
fdb.close()
self._print('Done finding similars.')
def find_similars(self,similarity_cut,batch_size=GET_SIMILARS_BATCH_SIZE):
"""
For each unnamed function in this database find a similar functions
from the fcatalog remote db, and rename appropriately.
"""
try:
t = self._te.execute(self._find_similars_thread,\
similarity_cut,batch_size)
except ThreadExecutorError:
print('Another operation is currently running. Please wait.')
def clean_idb():
"""
Clean all fcatalog marks and names from this idb.
"""
print('Cleaning idb...')
for func_addr in Functions():
# Skip functions that are not fcatalog named:
if not is_func_fcatalog(func_addr):
continue
print('{}'.format(GetFunctionName(func_addr)))
# Clear function's name:
make_name(func_addr,'')
# Clean fcatalog comments from the function:
func_comment = get_func_comment(func_addr)
set_func_comment(func_addr,strip_comment_fcatalog(func_comment))
print('Done cleaning idb.')
| xorpd/fcatalog_client | fcatalog_client/ida_client.py | ida_client.py | py | 10,885 | python | en | code | 26 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ida_ts.GetFunctionName",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "ida_ts.get_func_length",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "threa... |
5884209191 | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 1 17:39:54 2019
@author: Akshay
"""
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def alpha(line):
temp=[]
for word in line.split():
if is_number(word):
for num in list(word):
temp.append(num)
else:
temp.append(word)
return " ".join(temp)
def clean_text(text):
'''not working'''
#text=re.sub(r'(\d+)',r'',text)
text=text.replace(u',','')
text=text.replace(u'"','')
text=text.replace(u'(','')
text=text.replace(u')','')
text=text.replace(u'"','')
text=text.replace(u':','')
text=text.replace(u"'",'')
text=text.replace(u"‘‘",'')
text=text.replace(u"’’",'')
text=text.replace(u"''",'')
text=text.replace(u".",'')
text=text.replace(u"@",'')
text=text.replace(u"%",'')
text=text.replace(u"$",'')
text=text.replace(u"*",'')
text=text.replace(u"-",'')
text=text.replace(u"&",'')
text=text.replace(u"!",'')
text=text.replace(u"^",'')
text=text.replace(u"#",'')
text=text.replace(u"*",'')
text=text.replace(u"=",'')
text=text.replace(u"<",'')
text=text.replace(u">",'')
text=text.replace(u"_",'')
text=text.replace(u"*",'')
#!"#$%&'()*+,-.:;<=>?@[]^_`{|}~
return text
def clean_lines(line):
# prepare a translation table to remove punctuation
table = str.maketrans('', '', '!"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~')
# strip source cnn office if it exists
""" index = line.find('PUBLISHED:')
if index > -1:
line = line[index+len('(CNN)'):]"""
# tokenize on white space
line = line.split()
# convert to lower case
line = [word.lower() for word in line]
# remove punctuation from each token
line = [w.translate(table) for w in line]
temp = []
for word in line:
if is_number(word):
for num in list(word):
temp.append(num)
else:
temp.append(word)
line = temp
# store as string
# remove empty strings
#cleaned = [c for c in cleaned if len(c) > 0]
return ' '.join(line)
def hindi_clean(data):
input_text=data
remove_nuktas=False
factory=IndicNormalizerFactory()
normalizer=factory.get_normalizer("hi",remove_nuktas)
output_text=normalizer.normalize(input_text)
output_text=clean_lines(output_text)
output_text=alpha(output_text)
return output_text+'\n'
a = open("monolingual.hi","r",encoding="utf-8")
#data = a.readline()
count = 0
data = ''
while count != 500000:
try:
sentence = a.readline()
sentence = hindi_clean(sentence)
if 10 <len(sentence.strip().split()) <=50:
data = data + sentence
count+=1
if count%1000==0:
print(count)
except UnicodeDecodeError:
pass
a.close()
with open("hindi_mono_50000_iitB.txt","w",encoding="utf-8") as hindi:
hindi.write(data)
with open("hindi_mono_50000_iitB.txt","r",encoding="utf-8") as hindi:
data = hindi.readlines()
import re
INDIC_NLP_LIB_HOME=r"C:\Users\Akshay\Documents\GitHub\indic_nlp_library"
# The path to the local git repo for Indic NLP Resources
INDIC_NLP_RESOURCES=r"C:\Users\Akshay\Documents\GitHub\indic_nlp_resources"
import sys
sys.path.append('{}/src'.format(INDIC_NLP_LIB_HOME))
from indicnlp import common
common.set_resources_path(INDIC_NLP_RESOURCES)
from indicnlp import loader
loader.load()
# formating
from indicnlp.normalize.indic_normalize import IndicNormalizerFactory
input_text=data[2]
remove_nuktas=True
factory=IndicNormalizerFactory()
normalizer=factory.get_normalizer("hi",remove_nuktas)
output_text=normalizer.normalize(input_text)
print(input_text)
print(output_text)
print('Length before normalization: {}'.format(len(input_text)))
print('Length after normalization: {}'.format(len(output_text)))
line = data[1000]
import collections
temp=data.split()
vocab_eng = collections.Counter(temp)
vocab_eng = zip(vocab_eng.keys(),vocab_eng.values())
vocab_eng = list(vocab_eng)
vocab_eng = sorted(vocab_eng,key=lambda x : x[1],reverse=True)
vocab_eng = vocab_eng[:50000]
vocab_eng_text = ''
for word,count in vocab_eng:
vocab_eng_text+=word+"\n"
with open("hindi_vocab.txt","w",encoding="utf-8") as a:
a.write(vocab_eng_text)
#def clean_lines(line):
# # prepare a translation table to remove punctuation
# table = str.maketrans('', '', '!"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~')
#
# # strip source cnn office if it exists
# """ index = line.find('PUBLISHED:')
# if index > -1:
# line = line[index+len('(CNN)'):]"""
# # tokenize on white space
# line = line.split()
# # convert to lower case
# line = [word.lower() for word in line]
# # remove punctuation from each token
# line = [w.translate(table) for w in line]
#
# temp = []
# for word in line:
# if is_number(word):
# for num in list(word):
# temp.append(num)
# else:
# temp.append(word)
# line = temp
# # store as string
#
# # remove empty strings
# #cleaned = [c for c in cleaned if len(c) > 0]
# return ' '.join(line) + '\n' | gakshaygupta/Semi-Supervised-Cross-Lingual-Text-summarization | undreamt/preprocessing 2.py | preprocessing 2.py | py | 5,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "indicnlp.common.set_resources_path",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "i... |
8984732684 | # pylint: disable=too-many-public-methods, too-many-arguments, fixme
"""
CVE-bin-tool tests
"""
import importlib
import os
import shutil
import sys
import tempfile
import unittest
import pytest
from cve_bin_tool.cvedb import CVEDB
from cve_bin_tool.version_scanner import VersionScanner
from .test_data import __all__ as all_test_name
from .utils import download_file, LONG_TESTS
BINARIES_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "binaries")
# load test data
test_data = list(
map(lambda x: importlib.import_module(f"test.test_data.{x}"), all_test_name)
)
mapping_test_data = map(lambda x: x.mapping_test_data, test_data)
package_test_data = map(lambda x: x.package_test_data, test_data)
class TestScanner:
"""Runs a series of tests against our "faked" binaries.
The faked binaries are very small c files containing the same string signatures we use
in the cve-bin-tool. They should trigger results as if they contained the library and
version specified in the file name.
At this time, the tests work only in python3.
"""
@classmethod
def setup_class(cls):
cls.cvedb = CVEDB()
if os.getenv("UPDATE_DB") == "1":
cls.cvedb.get_cvelist_if_stale()
else:
print("Skip NVD database updates.")
# Instantiate a scanner
cls.scanner = VersionScanner(should_extract=True)
# temp dir for mapping tests
cls.mapping_test_dir = tempfile.mkdtemp(prefix="mapping-test-")
# temp dir for tests that require downloads
cls.package_test_dir = tempfile.mkdtemp(prefix="package_test-")
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.package_test_dir)
shutil.rmtree(cls.mapping_test_dir)
def test_false_positive(self):
self.scanner.all_cves = []
with tempfile.NamedTemporaryFile(
"w+b",
suffix="-test-false-positive.out",
dir=self.mapping_test_dir,
delete=False,
) as f:
common_signatures = [
# common strings generated by a compiler
b"\x7f\x45\x4c\x46\x02\x01\x01\x03\n",
b"GCC: (x86_64-posix-seh-rev0, Built by MinGW-W64 project) 8.1.0\n",
b"GNU C17 8.1.0 -mtune=core2 -march=nocona -g -g -g -O2 -O2 -O2 -fno-ident -fbuilding-libgcc -fno-stack-protector\n",
b"../../../../../src/gcc-8.1.0/libgcc/libgcc2.c\n",
rb"C:\mingw810\x86_64-810-posix-seh-rt_v6-rev0\build\gcc-8.1.0\x86_64-w64-mingw32\libgcc\n",
b"GCC: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n",
# bare version strings.
b"1_0",
b"1_2_3",
b"1.4",
b"1.2.3",
b"6.7a",
b"8.9.10-11",
b"1-2",
b"1-2-4",
b"1.2.3-rc.1",
]
f.writelines(common_signatures)
filename = f.name
for params in self.scanner.scan_file(
os.path.join(self.mapping_test_dir, filename)
):
if params:
pytest.fail(msg=f"Checker has detected false positive: {params}")
@pytest.mark.parametrize(
"product, version, version_strings",
(
(d["product"], d["version"], d["version_strings"])
for list_data in mapping_test_data
for d in list_data
),
)
def test_version_mapping(self, product, version, version_strings):
"""Helper function to scan a binary and check that it contains
certain cves for a version and doesn't contain others."""
# Run the scan
version_strings = list(map(lambda s: f"{s}\n".encode("ascii"), version_strings))
# first filename will test "is" and second will test "contains"
filenames = [
f"-{product}-{version}.out",
f"{'.'.join(list(product))}-{version}.out",
]
for filename in filenames:
with tempfile.NamedTemporaryFile(
"w+b", suffix=filename, dir=self.mapping_test_dir, delete=False
) as f:
f.write(b"\x7f\x45\x4c\x46\x02\x01\x01\x03\n")
f.writelines(version_strings)
filename = f.name
list_products = set()
list_versions = set()
expected_path = os.path.join(self.mapping_test_dir, filename)
for scan_info in self.scanner.recursive_scan(expected_path):
if scan_info:
product_info, file_path = scan_info
list_products.add(product_info.product)
list_versions.add(product_info.version)
assert file_path == expected_path
assert product in list_products
assert version in list_versions
@pytest.mark.parametrize(
"url, package_name, product, version",
(
(d["url"], d["package_name"], d["product"], d["version"])
for list_data in package_test_data
for d in list_data
),
)
@unittest.skipUnless(LONG_TESTS() > 0, "Skipping long tests")
def test_version_in_package(self, url, package_name, product, version):
"""Helper function to get a file (presumed to be a real copy
of a library, probably from a Linux distribution) and run a
scan on it. Any test using this should likely be listed as a
long test."""
# get file
tempfile = os.path.join(self.package_test_dir, package_name)
download_file(url + package_name, tempfile)
# new scanner for the new test.
# self.scanner = VersionScanner(self.cve_scanner, should_extract=True)
# run the tests
list_products = set()
list_versions = set()
for scan_info in self.scanner.recursive_scan(tempfile):
if scan_info:
product_info, file_path = scan_info
list_products.add(product_info.product)
list_versions.add(product_info.version)
# Make sure the product and version are in the results
assert product in list_products
assert version in list_versions
def test_does_not_scan_symlinks(self):
""" Test that the scanner doesn't scan symlinks """
if sys.platform.startswith("linux"):
# we can only do this in linux since symlink is privilege operation in windows
os.symlink("non-existant-file", "non-existant-link")
try:
with pytest.raises(StopIteration):
next(
self.scanner.scan_file(
os.path.join(self.mapping_test_dir, "non-existant-link")
)
)
finally:
os.unlink("non-existant-link")
def test_cannot_open_file(self, caplog):
""" Test behaviour when file cannot be opened """
with pytest.raises(StopIteration):
next(
self.scanner.scan_file(
os.path.join(self.mapping_test_dir, "non-existant-file")
)
)
assert str.find("Invalid file", caplog.text)
def test_clean_file_path(self):
filepath = (
"/tmp/cve-bin-tool/dhtei34fd/file_name.extracted/usr/bin/vulnerable_file"
)
expected_path = "/usr/bin/vulnerable_file"
cleaned_path = self.scanner.clean_file_path(filepath)
assert expected_path == cleaned_path
| chinvib66/cve-bin-tool | test/test_scanner.py | test_scanner.py | py | 7,556 | python | en | code | null | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line... |
17561063252 | # -*- coding: utf-8 -*-
import re
import os
import sys
import json
import time
import random
import urllib2
import win32gui
import win32con
import win32api
# Current path
ROOT = ""
# Log path
LOG_PATH = "log.txt"
# Replace special chars to this char in file name
NAME_FILL_CHAR = "-"
# Access random index separate
INDEX_FILL_CHAR = "~"
# config path
CONFIG_PATH = "config.json"
# system encode type
SYS_ENCODE = sys.getfilesystemencoding()
# default config
DEFAULT_CONFIG = ('{\n'
' "api_url": "http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=8",\n'
' "picture_url_locat": "images [0] url",\n'
' "name_type": "url",\n'
' "wallpaper_fill_type": "fill",\n'
' "picture_store_path": "pics/",\n'
' "picture_postfix": ""\n'
'}')
# CurrentDir can get current path
# Because the path is different between run this script and run pyinstaller file
# this func can return the right path
def CurrentDir():
path = sys.argv[0]
if path.endswith('.exe'):
return path[:path.rindex('\\')]
else:
return sys.path[0]
# ConcatURL can concat two url
# when the rightURL is a absolute url (such as "http://xxx.com"), it will return the rightURL directly.
def ConcatURL(leftURL, rightURL):
if rightURL.startswith('http'):
return rightURL
host = leftURL
slashIndex = leftURL.find('/', 7)
if slashIndex != -1:
host = leftURL[:slashIndex]
if rightURL.startswith('/'):
return host + rightURL
else:
return host + "/" + rightURL
# SlashToBacklash as its name
# This func will replace slash("/") to backlash("\") in pathStr
def SlashToBacklash(pathStr):
return pathStr.replace('/', '\\')
# ConcatPath can concatenate two path
# when the rightURL is a absolute path (such as "C:\Windows"), it will return the rightPath directly.
def ConcatPath(leftPath, rightPath):
if rightPath.find(':') != -1:
return rightPath
if leftPath.endswith('\\'):
return leftPath + rightPath.strip('\\')
else:
return leftPath + '\\' + rightPath.strip('\\')
# MultipleReplace will replace chars, which defined in "excludeStr", to NAME_FILL_CHAR in baseStr
# Example:
# NAME_FILL_CHAR = "-"
# > MultipleReplace("123#45*6","#*")
# "123-45-6"
#
def MultipleReplace(baseStr, excludeStr):
for char in excludeStr:
baseStr = baseStr.replace(char, NAME_FILL_CHAR)
return baseStr
# LegalizeStr can delete some special chars in "baseStr"
def LegalizeStr(baseStr):
excludeStr = "/:*?<>|\""
return MultipleReplace(baseStr, excludeStr)
# ReadFile will return file content
def ReadFile(path):
if not os.path.exists(path):
Log("%s dosn't exists!" % path, 3, True)
else:
if not os.access(path, os.R_OK):
Log("Can't read %s" % path, 2, True)
handler = open(path, 'r')
data = handler.read()
handler.close()
return data
# WriteFile will write data to a file
def WriteFile(path, fileName, mode, data):
if not os.path.exists(path):
try:
os.makedirs(path)
except:
Log("Can't write %s to %s" % (fileName, path), 2, True)
else:
if not os.access(path, os.W_OK):
Log("Can't write %s to %s" % (fileName, path), 2, True)
fullPath = ConcatPath(path, fileName)
handler = open(fullPath, mode)
handler.write(data)
handler.close()
return fullPath
# ReadJSON is a pack of read json
# If important is True, get item failed will lead program to exit
# If important is False, get item failed will return '' and would not lead program to exit
def ReadJSON(jsonObj, key, important=True, isRandom=False):
if isinstance(jsonObj, dict):
if jsonObj.has_key(key):
return jsonObj[key]
else:
if important:
Log("Can't get %s in JSON object!" % key, 1, True)
else:
return ''
else:
if len(jsonObj) < 1:
if important:
Log("The array length is 0", 1, True)
else:
return ''
if isRandom:
key = random.randrange(0, len(jsonObj))
return jsonObj[key]
if not isinstance(key, int):
Log("Can't use a string key to access array item.key: %s" % key, 1, True)
if key > len(jsonObj) - 1:
if important:
Log("The index is to large.key: %s" % key, 1, True)
else:
return ''
else:
return jsonObj[key]
# Log is record a log
# When doExit is True, the program will exit after log
def Log(message, code=0, doExit=False):
errDict = {
0: "Internal error: ",
1: "Config file error: ",
2: "Permission error: ",
3: "File system error: "
}
log = time.ctime() + " " + errDict[code] + message
WriteFile(ROOT, LOG_PATH, 'a', log + '\n')
if doExit:
sys.exit()
# LoadConfig can load a json config file
# and return an object
def LoadConfig(configFilePath):
if not os.path.exists(configFilePath):
Log("Config dose not exist!Creating a new config file.", 1)
WriteFile(ROOT, CONFIG_PATH, 'w', DEFAULT_CONFIG)
data = ReadFile(configFilePath)
try:
obj = json.loads(data)
except:
Log("Config file syntax error.",1,True)
return obj
# Fetch can get data from url
def Fetch(url):
handler = urllib2.urlopen(url)
data = handler.read()
handler.close()
return data
# FindInJson can get item from json by a special way
# "location" is a new way to access json
# more detail see the README.md file
def FindInJson(json, location):
steps = location.split(' ')
restObj = json
for step in steps:
isRandomm = False
if step == ("[" + INDEX_FILL_CHAR + "]"):
step = 0
isRandomm = True
elif step.startswith('[') and step.endswith(']'):
index = step.strip("[").rstrip("]")
if index.find(INDEX_FILL_CHAR) != -1:
indexList = index.split(INDEX_FILL_CHAR)
minIndex = 0
maxIndex = 0
try:
if indexList[0] != '':
minIndex = int(indexList[0])
maxIndex = int(indexList[1])
step = random.randrange(minIndex, maxIndex)
except:
Log('locat [] in config file are error.', 1)
step = 0
else:
step = int(index)
restObj = ReadJSON(restObj, step, isRandom=isRandomm)
return restObj
# set wallpaper at windows
def SetWallpaper(imagePath, fillType='fill'):
tile = "0"
if fillType == "tile":
fillType = "center"
tile = "1"
fillDict = {
"fill": "10",
"fit": "6",
"Stretch": "2",
"center": "0",
"span": "22"
}
style = fillDict[fillType]
key = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER,
r"Control Panel\Desktop", 0,
win32con.KEY_SET_VALUE)
win32api.RegSetValueEx(key, "WallpaperStyle", 0, win32con.REG_SZ, style)
win32api.RegSetValueEx(key, "TileWallpaper", 0, win32con.REG_SZ, tile)
win32gui.SystemParametersInfo(
win32con.SPI_SETDESKWALLPAPER, imagePath, 1 + 2)
# main script
if __name__ == "__main__":
# Init root path
ROOT = CurrentDir() + "\\"
# Load config
config = LoadConfig(CONFIG_PATH)
# If 'name_fill_char' dose not exist in config file it will use the default 'NAME_FILL_CHAR'
if ReadJSON(config, 'name_fill_char', important=False) != '':
NAME_FILL_CHAR = ReadJSON(config, 'name_fill_char', important=False)
# Get api url from config
api = ReadJSON(config, 'api_url')
# Get data from url
data = Fetch(api)
# load api content to json obj
try:
obj = json.loads(data)
except:
Log("The API content is not recognised to be a json type.")
# # Get picture url from json with the special syntax
picURL = FindInJson(obj, ReadJSON(config, 'picture_url_locat'))
# Concat url can avoid the different between "http://xxx.com/xx.jpg" and "/xx.jpg"
pic_host = ReadJSON(config, 'picture_url_host', important=False)
if pic_host != "":
picURL = ConcatURL(pic_host, picURL)
else:
picURL = ConcatURL(api, picURL)
picName = ""
# get nameType from config
nameType = ReadJSON(config, 'name_type')
# process different nameType
if ReadJSON(config, 'picture_url_locat').find(INDEX_FILL_CHAR) != -1:
if nameType != 'url':
nameType = 'time'
if nameType == "url":
if picURL.find("?") == -1:
picName = picURL[picURL.rindex("/") + 1:]
else:
picName = picURL[picURL.rindex("/") + 1:picURL.index("?")]
elif nameType == "json":
picName = FindInJson(obj, ReadJSON(config, 'name_locat'))
picName = MultipleReplace(picName, ReadJSON(config, 'name_exclude_char'))
try:
picName.encode(SYS_ENCODE)
except:
Log(('The name has special charactor, '
'you should use "name_exclude_char" in the config'
' to exclude it.'), 1, True)
elif nameType == 'time':
picName = str(int(time.time() * 1000))
else:
Log("Don't support this name_type.", 1, True)
picName = LegalizeStr(picName)
# Get picture data
picData = Fetch(picURL)
fullPath = WriteFile(
ConcatPath(ROOT, SlashToBacklash(ReadJSON(config, 'picture_store_path'))),
picName + ReadJSON(config, 'picture_postfix'), 'wb', picData)
# Set wallpaper
SetWallpaper(fullPath, ReadJSON(config, 'wallpaper_fill_type'))
| AielloChan/pywallpaper | main.py | main.py | py | 9,893 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sys.getfilesystemencoding",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists"... |
32995011041 | """
Test executors logic.
"""
import yaml
from unittest import TestCase
from voluptuous import Schema
from six import StringIO
from mock import (
patch,
call,
MagicMock,
)
from plix.executors import (
BaseExecutor,
ShellExecutor,
)
from .common import MockDisplay
class ExecutorsTests(TestCase):
def test_executors_are_yaml_representable(self):
class MyExecutor(BaseExecutor):
options_schema = Schema({'foo': str})
__module__ = 'my_module'
executor = MyExecutor(options={'foo': 'bar'})
self.assertEqual(
yaml.dump({
'name': 'my_module.MyExecutor',
'options': {
'foo': 'bar',
}
}),
yaml.dump(executor),
)
def test_executors_are_yaml_representable_without_options(self):
class MyExecutor(BaseExecutor):
options_schema = Schema({'foo': str})
__module__ = 'my_module'
executor = MyExecutor(options={})
self.assertEqual(
yaml.dump('my_module.MyExecutor'),
yaml.dump(executor),
)
def test_base_executors_requires_a_returncode(self):
class MyExecutor(BaseExecutor):
def execute_one(self, environment, command, output):
pass
display = MockDisplay()
executor = MyExecutor()
with self.assertRaises(RuntimeError):
executor.execute(
environment={},
commands=['a'],
display=display,
)
def test_base_executors_returns_true_on_success(self):
class MyExecutor(BaseExecutor):
def execute_one(self, environment, command, output):
return 0
display = MockDisplay()
executor = MyExecutor()
status = executor.execute(
environment={},
commands=['a'],
display=display,
)
self.assertTrue(status)
def test_base_executors_returns_false_on_failure(self):
class MyExecutor(BaseExecutor):
def execute_one(self, environment, command, output):
return 1
display = MockDisplay()
executor = MyExecutor()
status = executor.execute(
environment={},
commands=['a'],
display=display,
)
self.assertFalse(status)
def test_shell_executor_execute(self):
environment = {"FOO": "BAR"}
commands = ["alpha", "beta"]
display = MockDisplay()
executor = ShellExecutor()
def Popen(command, env=None, *args, **kwargs):
process = MagicMock()
process.stdout = StringIO(command)
process.returncode = 0
self.assertEqual(environment, env)
return process
with patch('subprocess.Popen', side_effect=Popen):
executor.execute(
environment=environment,
commands=commands,
display=display,
)
self.assertEqual(
display.start_command.mock_calls,
[
call(index=0, command="alpha"),
call(index=1, command="beta"),
],
)
self.assertEqual(
display.stop_command.mock_calls,
[
call(index=0, command="alpha", returncode=0),
call(index=1, command="beta", returncode=0),
],
)
self.assertEqual(
display.command_output.mock_calls,
[
call(0, "alpha"),
call(1, "beta"),
],
)
| freelan-developers/plix | tests/test_executors.py | test_executors.py | py | 3,697 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "plix.executors.BaseExecutor",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "voluptuous.Schema",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "yaml.... |
34101450986 | #!/usr/bin/python3
import pandas as pd
import numpy as np
import seaborn as sns
import statsmodels.formula.api as sm
import pylab as plt
import argparse
import os
import matplotlib
from matplotlib.gridspec import GridSpec
from pathlib import Path
from tqdm import tqdm
plt.style.use('bmh')
HOME = str(Path.home())
PLOT_DIR = os.path.join(HOME, 'source/presentationEAVM/plots')
def RAD(deg):
return deg * np.pi / 180.0
def CREATE_STATS_PLOTS(num_seg, out_dir):
df = data_lv.loc[data_lv.Segm == num_seg]
result = sm.ols(formula="Voltage ~ WT", data=df).fit()
fig = plt.figure(figsize=(5, 7))
gs = GridSpec(3, 1)
ax2 = fig.add_subplot(gs[2, 0])
ax1 = fig.add_subplot(gs[0:2, 0])
fig.tight_layout(w_pad=8)
ax1.set_title('Regression plot for {} segment'.format(num_seg))
sns.distplot(result.resid, ax=ax2, axlabel='Residuals distribution')
sns.regplot(x='WT', y='Voltage', data=df, ax=ax1, x_jitter=.1, x_estimator=np.mean)
box_text = 'r2 = {}\nslope = {}\np = {}\nNum. observations = {}'.format(round(GET_R2(num_seg), 5),
round(GET_SLOPE(num_seg), 5),
round(result.pvalues['WT'], 10),
int(result.nobs))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax1.text(0.05, 0.75, box_text, transform=ax1.transAxes, bbox=props)
plt.savefig(os.path.join(out_dir, '{}.png'.format(num_seg)))
plt.close()
def GET_R2(num_seg):
df = data_lv.loc[data_lv.Segm == num_seg]
result = sm.ols(formula="Voltage ~ WT", data=df).fit()
return result.rsquared
def GET_SLOPE(num_seg):
df = data_lv.loc[data_lv.Segm == num_seg]
result = sm.ols(formula="Voltage ~ WT", data=df).fit()
return result.params.WT
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', type=str, help='.csv file with data')
parser.add_argument('-d', type=str, help='directory with *data*.csv')
args = parser.parse_args()
if args.f is None and args.d is None:
raise ValueError('No path to data')
elif args.d is None:
data_lv = pd.read_csv(args.f, index_col=0)
folder = args.f.split('/')[-1].split('.')[0]
PLOT_DIR = os.path.join(PLOT_DIR, folder)
if not os.path.isdir(PLOT_DIR):
os.mkdir(PLOT_DIR)
for n in tqdm(range(1, 18)):
CREATE_STATS_PLOTS(n, PLOT_DIR)
elif args.f is None:
files = list(os.walk(args.d))[0][2]
for f in tqdm(files):
data_lv = pd.read_csv(os.path.join(args.d, f), index_col=0)
folder = f.split('/')[-1].split('.')[0]
out_dir = os.path.join(PLOT_DIR, folder)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for n in range(1, 18):
CREATE_STATS_PLOTS(n, out_dir) | Airplaneless/presentationEAVM | createPlot.py | createPlot.py | py | 3,017 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pylab.style.use",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pylab.style",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path.home",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
16538220342 | """
Functionality for working with plasma density data created by the
NURD algorithm
See Zhelavskaya et al., 2016, doi:10.1002/2015JA022132
"""
import os
import glob
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from spacepy import pycdf
def read_cdf(filename, \
pathname='/Users/mystical/Work/Science/RBSP/DataCdf/rbsp-a/nurd/'):
"""
read_cdf(filename, \
pathname='/Users/mystical/Work/Science/RBSP/DataCdf/rbsp-a/nurd/')
Read NURD cdf file as downloaded from:
ftp://rbm.epss.ucla.edu/ftpdisk1/NURD
Input: filename string, pathname string
Output: nurd dataframe with columns ['UT' [datetime], 'timestamp',
'L', 'MLT', 'MLAT', 'R', 'ne'] otherwise -1 if cdf file
not found.
"""
fullpath_filename = pathname + filename
if not os.path.isfile( fullpath_filename ):
print("**** File Not Found: " + fullpath_filename)
return -1
data = pycdf.CDF(fullpath_filename)
nurd = pd.DataFrame( data['Epoch'][:], columns=['UT'])
nurd['timestamp'] = nurd['UT'].apply( lambda x: time.mktime(x.timetuple()) )
nurd['L'] = data['L'][:]
nurd['MLT'] = data['MLT'][:]
nurd['MLAT'] = data['magLat'][:]
nurd['R'] = np.sqrt(data['x_sm'][:]**2 + data['y_sm'][:]**2 + \
data['z_sm'][:]**2)
nurd['ne'] = data['density'][:]
# Drop undefined location values
nurd.drop(np.where( np.isnan(nurd.L) )[0], inplace=True)
# Store metadata as attributes
# DOESN'T ACTUALLY WORK, GETS ERASED BY OTHER PANDAS FUNCTIONS
nurd.spacecraft = 'RBSP-a'
data.close()
return nurd
def find_psphere(ne_eq, L):
"""
find_psphere(ne_eq, L)
Returns a boolean array of whether the density is inside
or outside the plasmapause defined as 100 cm^-3
at L=4 and scaled as L^-4
Inputs: ne_eq, L arrays of equatorial density values and L-shell
Outputs: pshere boolean array of whether point is inside plasmasphere
"""
ne_boundary = 100*(4/L)**4
psphere = np.where( ne_eq >= ne_boundary, True, False )
psphere = np.where( ne_eq < 0, True, psphere )
psphere = np.where( np.isnan(ne_eq), True, psphere )
return psphere
def denton_ne_eq( ne, L, R ):
"""
denton_ne_eq( ne, L, R ):
Estimates the equatorial density, ne_eq, based on L and R
using the Denton et al., 2002 formulation
Inputs: arrays of ne, L, R
Output: array of ne_eq
"""
# Ignore warnings on log(<0)
old = np.seterr( invalid = 'ignore' )
alpha_ne = 6.0 - 3.0*np.log10( ne ) +0.28*(np.log10( ne ))**2.0
alpha_L = 2.0-0.43*L
ne_eq = ne/np.power((L/R),(alpha_ne+alpha_L))
# Denton forumulation for ne_eq only value over certain range
# of L, R and ne otherwise stick with ne
ne_eq = np.where( np.logical_or(R < 2.0, L < 2.5), ne, ne_eq)
ne_eq = np.where( np.logical_or(ne > 1500.0, ne_eq > ne ), ne, ne_eq)
return ne_eq
def plot_density_psphere( nurd ):
"""
plot_density_psphere( nurd )
Make a plot of density as a function of L, color-coded by
inside/outside plasmasphere
"""
fig = plt.figure()
ax = plt.gca()
ax.scatter( nurd[nurd.psphere].L, nurd[nurd.psphere].ne_eq, \
color='b', s=2, label='Plasmasphere')
ax.scatter( nurd[nurd.psphere==False].L, nurd[nurd.psphere==False].ne_eq, \
color='r', s=2, label='Plasmatrough')
ax.set_yscale('log')
ax.set_xlabel('L');
ax.set_ylabel('Equatorial Electron Density, cm^-3')
ax.set_title(nurd.spacecraft + ' ' + \
nurd.UT.iloc[0].strftime('%Y-%m-%d %H:%M:%S') + ' - ' + \
nurd.UT.iloc[-1].strftime('%Y-%m-%d %H:%M:%S') + ' UT')
ax.legend()
ax.set_ylim( min(nurd.ne_eq), max(nurd.ne_eq) )
plt.show(block=False)
plt.ion()
return
def load_day( datestr ):
"""
load_day( datestr )
Given a datestring in the form 'YYYYMMDD', read the nurd cdf file,
and find the intervals inside the plasmasphere
Output: nurd dataframe with columns ['UT' [datetime], 'timestamp',
'L', 'MLT', 'MLAT', 'R', 'ne', 'ne_eq', 'psphere']
otherwise -1 if cdf file not found
"""
pathname='/Users/mystical/Work/Science/RBSP/DataCdf/rbsp-a/nurd/'
filename_start = 'rbsp-a_'
filename_end = '.cdf'
filename_wild = filename_start + datestr + '*' + filename_end
filename = glob.glob( pathname + filename_wild )
if not filename:
print('NURD file not found: ' + pathname + filename_wild)
return -1
else:
# Remove pathname and read cdf file
filename = filename[0].replace(pathname, '')
nurd = read_cdf(filename)
nurd['ne_eq'] = denton_ne_eq(nurd['ne'], nurd['L'], nurd['R'] );
nurd['psphere'] = find_psphere( nurd['ne_eq'], nurd['L'] );
return nurd
# SOME TESTING AND DEBUGGING
#filename = 'rbsp-a_20140613_v1_3.cdf';
#nurd = read_cdf( filename )
#nurd['ne_eq'] = denton_ne_eq(nurd['ne'], nurd['L'], nurd['R'] );
#nurd['psphere'] = find_psphere( nurd['ne_eq'], nurd['L'] );
#plot_density_psphere(nurd)
#nurd = load_day('20131010')
#plot_density_psphere(nurd)
#plt.close()
#nurd.loc[nurd['ne']==min(nurd['ne']), 'ne']=-1
#plt.plot(nurd.UT, nurd['ne']/max(nurd['ne']), '.')
#plt.plot(nurd.UT, nurd.psphere, '.')
#plt.plot(nurd.UT, nurd['L']/max(nurd['L']), '.')
#
#plt.show(block=False)
| mariaspasojevic/PyRBSP | nurdpy.py | nurdpy.py | py | 5,014 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "spacepy.pycdf.CDF",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "spacepy.pycdf",
"li... |
15467387894 | from openpyxl.styles import Font, Border, Side, PatternFill, Alignment
from openpyxl import load_workbook
wb = load_workbook("/Users/gunju/Desktop/self study/python/deep/rpa_basic/1_excel/sample.xlsx")
ws = wb.active
a1 = ws["A1"]
b1 = ws["B1"]
c1 = ws["C1"]
ws.column_dimensions["A"].width = 5 # A열 너비 5로 설정
ws.row_dimensions[1].height = 50 # 1행 높이 50으로 설정
a1.font = Font(color = "FF0000", italic = True, bold = True) # 글자 색상 빨강, 이탤릭, 두껍게
b1.font = Font(color = "CC33FF", name = "Arial", strike = True) # 폰트는 Arial, 취소선
c1.font = Font(color = "0000FF", size = 20, underline = "single") # 글자크기 20, 밑줄 1줄(두 줄 하려면 double)
# 테두리 적용
thin_border = Border(left = Side(style = "thin"), right = Side(style = "thin"), top = Side(style = "thin"), bottom = Side(style = "thin"))
a1.border = thin_border
b1.border = thin_border
c1.border = thin_border
# 90점 넘는 셀에 대해서 초록색으로 적용
for row in ws.rows:
for cell in row:
# 셀 정렬 / left, right, top, bottom 가능
cell.alignment = Alignment(horizontal = "center", vertical = "center")
if cell.column == 1:
continue
# 셀이 정수형 데이터이고 90점보다 높으면
if isinstance(cell.value, int) and cell.value > 90:
cell.fill = PatternFill(fgColor = "00FF00", fill_type = "solid")
cell.font = Font(color = "FF0000")
# 틀 고정
ws.freeze_panes = "B2"
wb.save("/Users/gunju/Desktop/self study/python/deep/rpa_basic/1_excel/sample_style.xlsx") | hss69017/self-study | deep/rpa_basic/1_excel/11_cell_style.py | 11_cell_style.py | py | 1,608 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "openp... |
72871607463 | import numpy as np
import os
import torch
from tqdm import tqdm
from PIL import Image
import wandb
from .evaluation import eval_llh, eval_translation_error, rotation_model_evaluation, translation_model_evaluation
# Global variables
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def cosine_decay(step, hyper_param):
warmup_factor = min(step, hyper_param["warmup_steps"]) / hyper_param["warmup_steps"]
decay_step = max(step - hyper_param["warmup_steps"], 0) / (hyper_param['num_train_iter']- hyper_param["warmup_steps"])
return hyper_param['learning_rate'] * warmup_factor * (1 + np.cos(decay_step * np.pi)) / 2
def run_single_epoch(model, data_loader, hyper_param, num_iter, mode=0, optimizer=None):
progress_bar = tqdm(enumerate(data_loader), total=num_iter)
epoch_losses = []
#ipdb.set_trace()
for (i, input_) in progress_bar:
#load the images from the batch
img = input_['image']
img = img.to(DEVICE).float()
if mode==0:
ground_truth = input_['obj_pose_in_camera'][:,:3,:3].to(DEVICE).float()
else:
ground_truth = input_['obj_pose_in_camera'][:,:3,-1].to(DEVICE).float()
loss = model.compute_loss(img, ground_truth)
epoch_losses.append(loss.item())
#update the learning rate using the cosine decay
for g in optimizer.param_groups:
lr = cosine_decay(i+1, hyper_param)
g['lr'] = lr
#backward pass through the network
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.)
optimizer.step()
if i == num_iter:
break
mean_epoch_loss = np.mean(epoch_losses)
return mean_epoch_loss
def run_rotation_training(model, train_dataset, val_dataset, optimizer, hyper_param, checkpoint_dir, start_epoch=0):
"""In each epoch the model is trained for num_train_iter iterations defined in the configuration.
The evaluation step includes calculating the loglikelihood and the spread on a num_eval_step iterations.
The model is saved in pre-defined intervals, defined in the configuration. If additionally the full evaluation is enabled,
also all defined evaluation metrics are calculated
"""
train_losses = []
loglikelihood = []
mean_errors = []
median_errors = []
num_epochs = hyper_param['num_epochs']
for epoch in range(start_epoch+1, num_epochs+1):
# training
model.train()
#run a single training epoch
train_loss = run_single_epoch(model=model,
data_loader=train_dataset,
hyper_param=hyper_param,
num_iter=hyper_param['num_train_iter'],
mode=0,
optimizer=optimizer)
# validation
model.eval()
with torch.no_grad():
llh = []
for dataset in val_dataset:
llh.append(eval_llh(model, dataset=dataset,
num_eval_iter=hyper_param['num_val_iter'],
mode=0,
device=DEVICE))
if epoch==1:
rotation_model_evaluation(model=model, dataset=val_dataset, hyper_param_rot=hyper_param)
train_losses.append(train_loss)
loglikelihood.append(llh)
# log the loss values
wandb.log({
'TrainLoss': train_loss,
'Loglikelihood': sum(llh)/len(llh)
})
print("Epoch:", epoch, "....", "TrainLoss: ", train_loss, "Loglikelihood: ", llh)
# save a checkpoint
if ((epoch % hyper_param['save_freq'] == 0) and epoch>=hyper_param['start_save']) or epoch == num_epochs:
chkpt_name = f'checkpoint_{epoch}.pth' if epoch != num_epochs else f'checkpoint_final.pth'
chkpt_path = os.path.join(checkpoint_dir, chkpt_name)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
},
chkpt_path
)
# For every saved model run a full evaluation if full_evaluation is set to true
if hyper_param['full_eval']==True and epoch % hyper_param["eval_freq"]==0:
rotation_model_evaluation(model=model, dataset=val_dataset, hyper_param_rot=hyper_param)
print("Training finished.")
print("The final IPDF model was saved to: ", os.path.join(checkpoint_dir, 'checkpoint_final.pth'))
print("\nFinal evaluation metrics:\n")
print("Train Loss:\n", train_losses)
print("\nLoglikelihood:\n", loglikelihood)
print("\nMean Error:\n", mean_errors)
print("\nMedian Error:\n", median_errors)
def run_translation_training(model, train_dataset, val_dataset, optimizer, hyper_param, checkpoint_dir, start_epoch=0):
"""In each epoch the model is trained for num_train_iter iterations defined in the configuration.
The evaluation step includes calculating the loglikelihood and the spread on a num_eval_step iterations.
"""
num_epochs = hyper_param['num_epochs']
for epoch in range(start_epoch+1, num_epochs+1):
# training
model.train()
#run a single training epoch
train_loss = run_single_epoch(model=model,
data_loader=train_dataset,
hyper_param=hyper_param,
num_iter=hyper_param['num_train_iter'],
mode=1,
optimizer=optimizer)
# validation
llh = eval_llh(model, dataset=val_dataset,
num_eval_iter=hyper_param['num_val_iter'],
mode=1,
device=DEVICE)
model.eval()
error = 0.0
if epoch % hyper_param["eval_freq"] == 0 or epoch==1:
error = eval_translation_error(model, dataset=val_dataset,
batch_size=hyper_param['batch_size_val'],
eval_iter=hyper_param['num_val_iter'],
gradient_ascent=True)
# log the loss values
wandb.log({
'TrainLoss': train_loss,
'Loglikelihood': llh,
'EstimateError': error
})
else:
wandb.log({
'TrainLoss': train_loss,
'Loglikelihood': llh
})
print("Epoch:", epoch, "....", "TrainLoss: ", train_loss, " Loglikelihood: ", llh, " Estimate Error: ", error)
# save a checkpoint
if ((epoch % hyper_param['save_freq'] == 0) and epoch>=hyper_param['start_save']) or epoch == num_epochs:
chkpt_name = f'checkpoint_{epoch}.pth' if epoch != num_epochs else f'checkpoint_final.pth'
chkpt_path = os.path.join(checkpoint_dir, chkpt_name)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
},
chkpt_path
)
# For every saved model run a full evaluation if full_evaluation is set to true
"""if hyper_param['full_eval']==True and epoch % hyper_param["eval_freq"]==0:
translation_model_evaluation(model=model, dataset=val_dataset)"""
print("Training finished.")
print("The final IPDF model was saved to: ", os.path.join(checkpoint_dir, 'checkpoint_final.pth'))
| LDenninger/se3_pseudo_ipdf | se3_ipdf/training.py | training.py | py | 7,997 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
... |
263755309 | """
https://leetcode.com/problems/sum-of-all-odd-length-subarrays/
Given an array of positive integers arr, calculate the sum of all possible odd-length subarrays.
A subarray is a contiguous subsequence of the array.
Return the sum of all odd-length subarrays of arr.
"""
"""
For array of size x, start with x subarrays
If array.length>2, new array.length-3 subarrays of size 3
If array.length>4, new array.length-4 subarrays of size 5
For all odd n-length subarrays, create m new subarrays
where m is equal to the array length-n.
Add the subarrays.
Add the sums.
"""
from typing import List
class Solution:
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
table = arr.copy()
for n in range(3,len(arr),2):
for m in range(len(arr)-n):
table.append(sum(arr[m:m+n]))
return sum(table) | brandonaltermatt/leetcode-solutions | easy/array/sum_of_all_odd_length_subarrays.py | sum_of_all_odd_length_subarrays.py | py | 850 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
}
] |
32948325933 | import sys
#sys.path.insert(0, "/home/tomohiro/workspace/web_scraping_venv/lib/python3.6/site-packages")
from flask import Flask, render_template, request
from flask_socketio import SocketIO, emit, send
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from bs4 import BeautifulSoup
import requests
import re
import sys
import time
import datetime
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.config["MONGO_URI"] = "mongodb://localhost:27017/scrapingdb"
async_mode = None
ping_timeout = 300
socketio = SocketIO(app, async_mode=async_mode, ping_timeout=ping_timeout)
# If the connection gets disconnected in the middle,
# A new session is gonna be created, then
# the server is gonna get not to send back the data to the same session's client
# so that set ping_timeout long not to make the socketio disconnected
mongo = PyMongo(app)
bookmarks = mongo.db.bookmarks
thread = None
bk_list = [s for s in bookmarks.find()]
# @app.route("/", methods=["POST"])
# def test_db():
# url = request.form["mongo"]
# url = {"subject": url}
# urls.insert_one(url)
# result = mongo.db.urls.find({"subject": url})
# return render_template(("base.html"), urls=result)
@app.route("/", methods=["GET"])
def index_return():
#デフォルト設定でFlaskはhtmlの所在を、flaskアプリケーションと同じ階層にある
#templatesディレクトリの中に探しに行くので下記のようなパスでよい
return render_template("base.html")
@app.route("/itmedia/")
def itmedia():
return render_template("itmedia/itmain.html")
@app.route("/nikkei/")
def nikkei():
return render_template("nikkei/nkmain.html")
@app.route("/other/")
def other():
return render_template("other/otmain.html")
@app.route("/bookmark/")
def bookmark():
global bk_list
return render_template("bookmark/bkmain.html", list=bk_list)
@socketio.on('bk_db')
def managebk(bk_dict):
url = bk_dict["url"]
head = bk_dict["head"]
head = head.encode("raw_unicode_escape")
head = head.decode("utf-8")
bks = {"url": url, "head": head}
#To prevent duplicate update from db,
#need to add upsert=True at the end and use the mongo syntax $setOnInsert
bookmarks.update(
{'head': head},
{'$setOnInsert': bks},
upsert=True)
@socketio.on("message")
def initial_response(msg):
send("Hello!")
print(msg)
@socketio.on("myevent")
def handle_message(array):
url = array["url"]
word = array["head"]
#if re.match(r"(^\\[A-Z].*[a-z].*)?", word):
# 日本語だと強制的にutf-8?にエンコードされるので、それをutf-8に再度デコード
word = word.encode("raw_unicode_escape")
word = word.decode("utf-8")
#Getting the global variable, thread
#You can always default the thread value as None by doing this
global thread
if thread is None:
#Starting the function, "sending_results" as background task
#By doing this, you can devide the threads having the funcion to keep sokcket connection(main thread)
#and one to send the url and keyword
socketio.start_background_task(target=sending_results(url, word))
def sending_results(url, word):
start = time.time()
print(url)
print(word)
if url == "" or word == "":
print("”URL” と ”キーワード” を指定してください")
send("”URL” と ”キーワード” を指定")
sys.exit()
if not url.endswith("/"):
url = url + "/"
try:
res = requests.get(url)
except Exception as e:
print(e)
print("URLが正しくありません")
send("URLが正しくありません")
send(str(e))
sys.exit()
# Starting web scraping
emit("startcomplete",("Executing scraping"))
soup = BeautifulSoup(res.content, "html.parser")
a_tag_list = soup.find_all("a")
protocol = re.search(".*:", url).group(0)
url_list = []
### ループ内で毎回appendを呼び出していると、処理が遅くなるので
### appendにあらかじめ、url_list.appendという関数を入れておく
append=url_list.append
for a in a_tag_list:
a_link = str(a.get("href"))
if not re.match(".*/.*", a_link):
print("This hasn't been added to the list " + a_link)
elif a_link.startswith("//"):
full_url = protocol + a_link
append(full_url)
elif a_link.startswith("/") or a_link.startswith("./"):
#Removing "./" or "/" from a_link and conbining it with url
full_url = url + re.sub(r".?/", "", a_link, count=1)
append(full_url)
#Checking if a_link starts with alphanumeric + "/"
elif re.match(r"^\w+/", a_link):
full_url = url + a_link
append(full_url)
else:
append(a_link)
#appendを初期化
append=list.append
perfect_url_list =sorted(set([u for u in url_list if not re.match(".*article.*|.*=.*", u )]))
tmp_list = []
append=tmp_list.append
#global bk_list
for l in perfect_url_list:
try:
res = requests.get(l)
except Exception as e:
print(e)
#print("before for loop of a")
soup = BeautifulSoup(res.content, "html.parser")
a_tag_list = soup.find_all("a")
for a in a_tag_list:
try:
a_link = str(a.get("href"))
a_text = re.sub("・|\u3000|\n|\r|\t", "", a.text)
if re.match(".*{}.*".format(word), a.text) and not a_text in tmp_list:
if not re.match(".*/.*", a_link):
pass
elif a_link.startswith("//"):
article = protocol + a_link
elif a_link.startswith("/") or a_link.startswith("./"):
article = l + re.sub(r".?/", "", a_link, count=1)
res = requests.get(article)
if res.status_code != 200:
article = url + re.sub(r".?/", "", a_link, count=1)
elif re.match(r"^\w+/", a_link):
article = l + a_link
res = requests.get(article)
if res.status_code != 200:
article = url + a_link
else:
article = a.get("href")
append(a_text)
a_dict_list = {
"head": a_text,
"url": article,
#"sentense": get_p(article)
}
emit("myresponse", a_dict_list)
except Exception as e:
print(e)
print("Nothing to be sent to you")
emit("startcomplete", "DONE")
print("DONE")
elapsed = time.time() - start
print(elapsed)
append=list.append
if __name__ == "__main__":
#socketio = SocketIO(app) により、socketio.run(app)でflaskアプリを起動できる
socketio.run(app)
# @app.route("/next", methods=["POST"])
# def arg_check():
# if request.method == "POST":
# url = request.form["url"]
# keyword = request.form["search"]
# args = url + keyword
# return render_template("index.html", result=args)
# @app.route("/", methods=["POST"])
# def start_scraping():
# if request.method == "POST":
# url = request.form["url"]
# word = request.form["search"]
# return render_template("index.html", result=scraping(url,word))
# @app.route("/", methods=["POST"])
# def start_socketio():
# def ack():
# print("I got your message")
# @socketio.on("my event")
# def handle_message(json):
# time.sleep(1)
# emit("my response", json, callback=ack)
# print("Replied to the clinet")
| tomohiro3/scraping | work/flask_script.py | flask_script.py | py | 7,063 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask_socketio.SocketIO",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.rende... |
25569986801 | from json import load
from tkinter.font import BOLD
from openpyxl import load_workbook
from openpyxl.styles import PatternFill, Font
def edm(filename):
wb = load_workbook(filename)
sheet = wb['Recipients']
sheet2 = wb['Link Clicks - Detail']
# Deleting empty rows
sheet.delete_rows(1,1)
sheet.delete_rows(2,1)
# Deleting invalid Group Names
count = 0
for row in sheet['C']:
count += 1
if row.value == "Reagan_Toh" or row.value == "Seow_Ying":
sheet.delete_rows(count,2)
# Insert Status column
sheet.insert_cols(5)
sheet['E1'].value = "Status"
sheet['E1'].font = Font(bold=True)
sheet['E1'].fill = PatternFill("solid", fgColor="FFFF00")
# Clicked but unsubbed
count = 0
for row in sheet['K']:
count += 1
if row.value == "Y":
sheet[f'E{count}'].value = "1. Clicked (Unsub)"
# Clicked
# Getting unique clicked emails
clicked = []
count = 0
for row in sheet2['B']:
# Exclude header and blank rows
count += 1
if count >= 4:
clicked.append(row.value)
clicked = set(clicked)
# Check if specific emails clicked the eDM
count = 0
for row in sheet['B']:
count += 1
for item in clicked:
if item == row.value:
sheet[f'E{count}'].value = "1. Clicked"
# Opened eDMs
count = 0
for row in sheet['H']:
count += 1
if row.value and sheet[f'E{count}'].value == None:
sheet[f'E{count}'].value = "2. Opened"
# Soft Bounced and Hard Bounced
count = 0
for row in sheet['D']:
count += 1
if (row.value == 'Remote Server Error' or row.value == 'Error (C)') and sheet[f'E{count}'].value == None:
sheet[f'E{count}'].value = "4. Soft Bounced"
elif row.value == 'Bad Address' and sheet[f'E{count}'].value == None:
sheet[f'E{count}'].value = "5. Hard Bounced"
elif row.value == 'Sent' and sheet[f'E{count}'].value == None:
sheet[f'E{count}'].value = "3. Sent"
# Save workbook
wb.save(filename=filename)
wb.close() | iWantCry/eDM-Stats | edm.py | edm.py | py | 2,256 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 28,
"usage_type": "call"
}
] |
74362480745 | import scipy as sp
import numpy as np
import math
import input_checks
import ode_solver
def sparse_A(size, dm, do):
"""
Creates a sparse matrix used by the scipy spsolve function.
----------
Parameters
size : int
The square dimension value of the matrix.
dm : float OR int
Value along the main diagonal.
do : float OR int
Value along both the offset diagonals.
----------
Returns
A scipy sparse csr_matrix.
"""
diagonals = [np.ones(size)*dm, np.ones(size-1)*do, np.ones(size-1)*do]
return sp.sparse.diags(diagonals, [0, 1, -1], format='csr')
def full_A(size, dm, do):
"""
Creates a tridiagonal matrix.
----------
Parameters
size : int
The square dimension value of the matrix.
dm : float OR int
Value along the main diagonal.
do : float OR int
Value along both the offset diagonals.
----------
Returns
A np.ndarray tridiagonal matrix.
"""
return np.diag(np.ones(size)*dm, 0) + np.diag(np.ones(size-1)*do, 1) + np.diag(np.ones(size-1)*do, -1)
def solve_diffusion(method, l_bound_type, r_bound_type, l_bound_func, r_bound_func, init_func, D, x_min, x_max, nx, t_min, t_max, nt, source_func=None, l_bound_args=None, r_bound_args=None, init_args=None, source_args=None, use_sparse=True):
"""
Solves the diffusion equation using finite difference methods, based on the specified method and boundary conditions.
----------
Parameters
method : string
Either 'lines', 'explicit_euler', 'implicit_euler', 'crank_nicolson', or 'imex'.
l_bound_type : string
Either 'dirichlet', 'neumann', or 'robin'.
r_bound_type : string
Either 'dirichlet', 'neumann', or 'robin'.
l_bound_func : function
Function that takes values (x, u, t) and list (args) as inputs and returns the left boundary value.
r_bound_func : function
Function that takes singular values (x, u, t) and list (args) as inputs and returns the right boundary value.
init_func : function
Function that takes (x, u, t) and list (args) as inputs and returns intitial solution array.
D : float OR int
Diffusion Coefficient.
x_min : float OR int
Minimum x value.
x_max : float OR int
Maximum x value.
nx : int
Number of x values in the grid, affects step size used for x.
t_min : float OR int
Minimum t value.
t_max : float OR int
Maximum t value.
nt : int
Number of t values in the grid, affects step sized used for t.
source_func : function
Function that takes singular values (x, t, u) and list (args) as inputs and returns source value.
l_bound_args : list OR numpy.ndarray
Additional arguments needed by 'l_bound_func'. If (l_bound_type) is 'robin', must contain two values [delta, gamma], such that r_bound_func returns delta - gamma*u(x).
r_bound_args : list OR numpy.ndarray
Additional arguments needed by 'r_bound_func'. If (r_bound_type) is 'robin', must contain two values [delta, gamma], such that r_bound_func returns delta - gamma*u(x).
init_args : list OR numpy.ndarray
Additional arguments needed by 'init_func'.
source_args : list OR numpy.ndarray
Additional arguments needed by 'source_func'.
use_sparse : bool
True indicates that sparse matrices are used for calculations.
----------
Returns
A numpy.array with a row of values for each solved parameter, and the final row being the x-values solved at.
"""
# INPUT CHECKS
input_checks.test_string(method, 'method')
if method not in ['lines', 'explicit_euler', 'implicit_euler', 'crank_nicolson']:
raise Exception('Argument (method) must be either \'lines\', \'explicit_euler\', \'implicit_euler\', \'crank_nicolson\'.')
input_checks.test_string(l_bound_type, 'l_bound_type')
if l_bound_type not in ['dirichlet', 'neumann', 'robin']:
raise Exception('Argument (boundary_type) must be either \'dirichlet\', \'neumann\', or \'robin\'.')
input_checks.test_string(r_bound_type, 'r_bound_type')
if r_bound_type not in ['dirichlet', 'neumann', 'robin']:
raise Exception('Argument (boundary_type) must be either \'dirichlet\', \'neumann\', or \'robin\'.')
input_checks.test_function(l_bound_func, 'l_bound_func')
input_checks.test_function(r_bound_func, 'r_bound_func')
input_checks.test_function(init_func, 'init_func')
input_checks.test_float_int(D, 'D')
input_checks.test_float_int(x_min, 'x_min')
input_checks.test_float_int(x_max, 'x_max')
input_checks.test_int(nx, 'nx')
input_checks.test_float_int(t_min, 't_min')
input_checks.test_float_int(t_max, 't_max')
input_checks.test_int(nt, 'nt')
if source_func != None:
input_checks.test_function(source_func, 'source_func')
if l_bound_args != None:
input_checks.test_list_nparray(l_bound_args, 'l_bound_args')
if l_bound_type == 'robin':
if len(l_bound_args) != 2:
raise Exception('Argument (l_bound_args) must contain two values.')
if r_bound_args != None:
input_checks.test_list_nparray(r_bound_args, 'r_bound_args')
if r_bound_type == 'robin':
if len(r_bound_args) != 2:
raise Exception('Argument (r_bound_args) must contain two values.')
if init_args != None:
input_checks.test_list_nparray(init_args, 'init_args')
if source_args != None:
input_checks.test_list_nparray(source_args, 'source_args')
if use_sparse not in [True, False]:
raise Exception('Argument (use_sparse) must be a boolean True or False.')
# MEETING STABILITY CONDITION
dx = (x_max - x_min) / nx
dt = (t_max - t_min) / nt
C = (dt * D) / (dx ** 2)
if method == 'explicit_euler' or method == 'lines':
dt = 0.49*(dx**2)/D
nt = math.ceil((t_max - t_min) / dt)
C = (dt * D) / (dx ** 2)
# ADJUST SOURCE TERM
if source_func == None:
def source_func(x, t, u, args):
return np.zeros(np.size(x))
# CONSTRUCT GRIDS
x_arr = np.linspace(x_min, x_max, nx+1)
t_arr = np.linspace(t_min, t_max, nt+1)
# DEFINE x_int AND INITIALISE u_t
if l_bound_type == 'dirichlet':
xs = 1
else:
xs = 0
if r_bound_type == 'dirichlet':
x_int = x_arr[xs:-1]
else:
x_int = x_arr[xs:]
u_t = init_func(x_int, 0, 0, init_args)
size = len(u_t)
# CREATE MATRICES
if use_sparse == True:
I_mat = sp.sparse.identity(size, format='csr')
A_mat = sparse_A(size, -2, 1)
elif use_sparse == False:
I_mat = np.identity(size)
A_mat = full_A(size, -2, 1)
# DEFINE RHS VECTOR AND MODIFY SPARSE MATRICIES ACCORDING TO BOUNDARY CONDITIONS
if l_bound_type != 'dirichlet':
A_mat[0, 1] *= 2
if l_bound_type == 'robin':
A_mat[0, 0] *= 1 + l_bound_args[1] * dx
if r_bound_type != 'dirichlet':
A_mat[-1, -2] *= 2
if r_bound_type == 'robin':
A_mat[-1, -1] *= 1 + r_bound_args[1] * dx
def make_b(t):
b = np.zeros(size)
if l_bound_type == 'dirichlet':
b[0] = l_bound_func(x_min, t, u_t, l_bound_args)
elif l_bound_type == 'neumann':
b[0] = l_bound_func(x_min, t, u_t, l_bound_args) * -2 * dx
elif l_bound_type == 'robin':
b[0] = l_bound_func(x_min, t, u_t[0], [l_bound_args[0], 0]) * -2 * dx
if r_bound_type == 'dirichlet':
b[-1] = r_bound_func(x_max, t, u_t, r_bound_args)
elif r_bound_type == 'neumann':
b[-1] = r_bound_func(x_max, t, u_t, r_bound_args) * 2 * dx
elif r_bound_type == 'robin':
b[-1] = r_bound_func(x_max, t, u_t[-1], [r_bound_args[0], 0]) * 2 * dx
return b
# SOLVE
if use_sparse == True:
solver = sp.sparse.linalg.spsolve
elif use_sparse == False:
solver = np.linalg.solve
if method == 'lines':
def PDE(u, t, args):
A_mat, b = args
return (C/dt) * (A_mat@u + b + (dx**2) * (source_func(x_int, t, u, source_args)))
b = make_b(t_max)
sol = ode_solver.solve_to(PDE, u_t, t_min, t_max, dt, args=[A_mat, b])
u_t = sol[:,-1][:-1]
if method == 'explicit_euler':
for j in range(0, nt):
b = make_b(t_arr[j])
u_t = solver(I_mat, u_t + C*(A_mat@u_t + b + (dx**2) * (source_func(x_int, t_arr[j], u_t, source_args))))
if method == 'implicit_euler':
for j in range(0, nt):
b = make_b(t_arr[j])
u_t = solver(I_mat - (C*A_mat), u_t + C * (b + (dx**2) * (source_func(x_int, t_arr[j], u_t, source_args))))
if method == 'crank_nicolson':
for j in range(0, nt):
b = make_b(t_arr[j])
u_t = solver(I_mat - ((C/2)*A_mat), (I_mat + ((C/2)*A_mat))@u_t + C * (b + (dx**2) * (source_func(x_int, t_arr[j], u_t, source_args))))
# MODIFY u_t ACCORDING TO BOUNDARY CONDITIONS
if l_bound_type == 'dirichlet':
u_t = np.concatenate((np.array([l_bound_func(x_min, 0, 0, l_bound_args)]), u_t))
if r_bound_type == 'dirichlet':
u_t = np.concatenate((u_t, np.array([r_bound_func(x_max, 0, 0, r_bound_args)])))
return np.vstack([u_t, x_arr])
| jack-parr/scientific_computing | pde_solver.py | pde_solver.py | py | 9,406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.diags",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.diag",
"lin... |
3458942753 | import collections
import pandas as pd
import re
import pdftotext
import requests
from static.constants import FILE_PATH
class DrugsFDA:
def __init__(self, fda_app_num):
self.FDA_Application_Number = None
drugs_fda_df = pd.read_csv(FILE_PATH.DRUGSFDA_ORIGIN_DOCS, encoding="ISO-8859-1", delimiter='\t', dtype=str)
drugs_fda_application_df = pd.read_csv(FILE_PATH.DRUGSFDA_ORIGIN_APPLICATION, encoding="ISO-8859-1",
delimiter='\t',
dtype=str)
drugs_fda_df = pd.merge(drugs_fda_df, drugs_fda_application_df, how='left', on='ApplNo')
drugs_fda_df['FullApplNo'] = drugs_fda_df.ApplType + drugs_fda_df.ApplNo
drugs_fda_df.ApplicationDocsDate = pd.to_datetime(drugs_fda_df.ApplicationDocsDate).dt.date
drug_df = drugs_fda_df[(drugs_fda_df.FullApplNo == fda_app_num) & (drugs_fda_df.ApplicationDocsTypeID == '2')] \
.sort_values(['ApplicationDocsDate'], ascending=False)
if drug_df.shape[0] > 0:
drug = drug_df.iloc[0]
self.FDA_Application_Number = fda_app_num
self.Docs_Date = drug.ApplicationDocsDate
self.Docs_Url = drug.ApplicationDocsURL
self.Pdf = DrugsFDAPdf(get_pdf(self.Docs_Url))
def get_pdf(url):
try:
r = requests.get(url)
with open('templbl.pdf', 'wb') as fd:
fd.write(r.content)
f = open('templbl.pdf', 'rb')
pdf = pdftotext.PDF(f)
return '\n\n'.join(pdf)
except:
return ''
def build_dict_helper(title_idx_list, content_list):
content_dict = {}
for i, title_idx in enumerate(title_idx_list):
if i == len(title_idx_list) - 1:
content_dict[title_idx[1]] = content_list[title_idx[0] + 1:]
else:
content_dict[title_idx[1]] = content_list[title_idx[0] + 1:title_idx_list[i + 1][0]]
return content_dict
def group_sentence_helper(sentence_list):
end_list = [(i, item) for i, item in enumerate(sentence_list)
if len(item.split(' ')) <= 5 or item[-1] == '.' or item[-1] == ':']
group_sentence = []
if len(end_list) > 0:
for i, end in enumerate(end_list):
if i == 0:
group_sentence.append(' '.join(sentence_list[:end[0] + 1]))
else:
group_sentence.append(' '.join(sentence_list[end_list[i - 1][0] + 1:end[0] + 1]))
if end_list[-1] != len(sentence_list) - 1:
group_sentence.append(' '.join(sentence_list[end_list[-1][0] + 1:]))
return group_sentence
def print_dict(dict):
ordered_dict = collections.OrderedDict(sorted(dict.items()))
dict_content_list = []
for dict_key, dict_value in ordered_dict.items():
dict_content_list.append(dict_key)
dict_content_list = dict_content_list + group_sentence_helper(dict_value)
return dict_content_list
class DrugsFDAPdf:
filter_pattern = ['^Reference ID:', '^[0-9]+$', '^Page', '^$']
title_pattern = ['^FULL PRESCRIBING INFORMATION$',
'^\d+\s+INDICATIONS AND USAGE$',
'^\d+\s+DOSAGE AND ADMINISTRATION$',
'^\d+\s+DOSAGE FORMS AND STRENGTHS$',
'^\d+\s+CONTRAINDICATIONS$',
'^\d+\s+WARNINGS AND PRECAUTIONS$',
'^\d+\s+ADVERSE REACTIONS$',
'^\d+\s+DRUG INTERACTIONS$',
'^\d+\s+USE IN SPECIFIC POPULATIONS$',
'^\d+\s+DRUG ABUSE AND DEPENDENCE$',
'^\d+\s+OVERDOSAGE$',
'^\d+\s+DESCRIPTION$',
'^\d+\s+CLINICAL PHARMACOLOGY$',
'^\d+\s+NONCLINICAL TOXICOLOGY$',
'^\d+\s+CLINICAL STUDIES$',
'^\d+\s+HOW SUPPLIED/STORAGE AND HANDLING$',
'^\d+\s+PATIENT COUNSELING INFORMATION$',
'^Medication Guide$',
'^MEDICATION GUIDE$']
admef_pattern = ['^absorption$',
'^distribution$',
'^metabolism$',
'^excretion$',
'^elimination$',
'^effect of food$',
'^effects of food$',
'^food effect$',
'^food effects$',
'table',
'figure',
'population',
'impairment',
'geriatric',
'pediatric',
'gender',
'race',
'ethnicity',
'interaction',
'interactions',
'racial',
'age',
'populations',
'weight',
'half-life']
adme_pattern = ['absorption', 'distribution', 'metabolism', 'excretion', 'elimination']
def build_pdf_content_dict(self, pdf_content):
pdf_content_dict = {}
pdf_content_list = pdf_content.split('\n')
pdf_content_list = list(map(lambda s: re.sub(r'\s+', ' ', s).strip(), pdf_content_list))
pdf_content_list = list(filter(lambda s: not re.search(r'|'.join(self.filter_pattern), s), pdf_content_list))
title_idx_list = [(i, item) for i, item in enumerate(pdf_content_list) if
re.search(r'|'.join(self.title_pattern), item)]
# level 1 title
if pdf_content_list and len(title_idx_list) > 0:
pdf_content_dict = build_dict_helper(title_idx_list, pdf_content_list)
# level 2 title
for key, value in pdf_content_dict.items():
if re.search(r'^\d+', key):
section_number = re.search(r'^\d+', key).group()
sub_title_idx_list = [(i, item) for i, item in enumerate(value) if
re.search(rf'^{section_number}\.\d+\s+.+$', item)]
if len(sub_title_idx_list) > 0:
pdf_content_dict[key] = build_dict_helper(sub_title_idx_list, value)
pharmacokinetics = list(
filter(lambda t: 'pharmacokinetics' in t[1].lower(), sub_title_idx_list))
# level 3 title: ADMEF
if len(pharmacokinetics) > 0:
admef_content_list = pdf_content_dict[key][pharmacokinetics[0][1]]
admef_idx_list = [(i, item) for i, item in enumerate(admef_content_list)
if len(item.split(' ')) <= 10
and ',' not in item
and item[-1] != '.'
and re.search(r'|'.join(self.admef_pattern), re.sub(r'[^a-z ]+', '', item.lower().strip()))]
if len(list(filter(lambda item: re.search(r'|'.join(self.adme_pattern), item[1].lower()),
admef_idx_list))) > 0:
pdf_content_dict[key][pharmacokinetics[0][1]] = build_dict_helper(admef_idx_list, admef_content_list)
return pdf_content_dict
def dict_value_helper(self, path):
current_dict = self.pdf_content_dict
found = False
value = ''
for i, key in enumerate(path):
if type(current_dict) is dict:
for dict_key, dict_value in current_dict.items():
if re.search(rf'{key.lower()}', dict_key.lower()):
current_dict = dict_value
if i == len(path) - 1:
found = True
break
else:
return value
if found:
if type(current_dict) is list:
value = '\n\n'.join(group_sentence_helper(current_dict))
else:
value = '\n\n'.join(print_dict(current_dict))
return value
def pharmacokinetics_non_admef(self):
pharmacokinetics_list = self.Pharmacokinetics.split('\n\n')
pharmacokinetics_list = [x for x in pharmacokinetics_list if x not in self.Absorption.split('\n\n')]
pharmacokinetics_list = [x for x in pharmacokinetics_list if x not in self.Distribution.split('\n\n')]
pharmacokinetics_list = [x for x in pharmacokinetics_list if x not in self.Metabolism.split('\n\n')]
pharmacokinetics_list = [x for x in pharmacokinetics_list if x not in self.Excretion.split('\n\n')]
pharmacokinetics_list = [x for x in pharmacokinetics_list if x not in self.Food_Effect.split('\n\n')]
pharmacokinetics_list = [x for x in pharmacokinetics_list if not re.search(r'|'.join(self.adme_pattern + ['^effect of food', '^effects of food', '^food effect', '^food effects']), x.lower())]
return '\n\n'.join(pharmacokinetics_list)
def __init__(self, pdf_content):
self.pdf_content_dict = self.build_pdf_content_dict(pdf_content)
self.Boxed_Warning = self.dict_value_helper(['FULL PRESCRIBING INFORMATION'])
self.Indication = self.dict_value_helper(['INDICATIONS AND USAGE'])
self.Dosage_Administration = self.dict_value_helper(['DOSAGE AND ADMINISTRATION'])
self.Pregnancy = self.dict_value_helper(['USE IN SPECIFIC POPULATIONS', 'Pregnancy'])
self.Lactation = self.dict_value_helper(['USE IN SPECIFIC POPULATIONS', 'Lactation'])
self.Nursing_Mother = self.dict_value_helper(['USE IN SPECIFIC POPULATIONS', 'Nursing Mother'])
self.Females_Males_of_Reproductive_Potential = self.dict_value_helper(['USE IN SPECIFIC POPULATIONS',
'Females and Males of Reproductive Potential|Females & Males of Reproductive Potential'])
self.Mechanism_of_Action = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Mechanism of Action'])
self.Pharmacodynamics = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacodynamics'])
self.Pharmacokinetics = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics'])
self.Absorption = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'Absorption'])
self.Distribution = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'Distribution'])
self.Metabolism = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'Metabolism'])
# self.Elimination = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'Elimination'])
self.Excretion = self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'Excretion'])
self.Excretion += self.dict_value_helper(['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'Elimination'])
self.Food_Effect_1 = self.dict_value_helper(
['CLINICAL PHARMACOLOGY', 'Pharmacokinetics', 'food effect|food effects|effect of food|effects of food'])
self.Food_Effect_2 = ''
self.Cytotoxic = self.dict_value_helper(['NONCLINICAL TOXICOLOGY', 'Carcinogenesis'])
self.How_Supplied = self.dict_value_helper(['HOW SUPPLIED'])
self.Information_for_Patients = self.dict_value_helper(['PATIENT COUNSELING INFORMATION'])
if self.Pharmacokinetics:
paragraph_list = self.Pharmacokinetics.split('\n\n')
for paragraph in paragraph_list:
if re.search(r'^absorption\s*(:|—|-|–)', paragraph.lower()) and not self.Absorption:
title = re.split(':|—|-|–', paragraph)[0]
self.Absorption = paragraph[len(title)+1:].strip()
if re.search(r'^distribution\s*(:|—|-|–)', paragraph.lower()) and not self.Distribution:
title = re.split(':|—|-|–', paragraph)[0]
self.Distribution = paragraph[len(title)+1:].strip()
if re.search(r'^metabolism\s*(:|—|-|–)', paragraph.lower()) and not self.Metabolism:
title = re.split(':|—|-|–', paragraph)[0]
self.Metabolism = paragraph[len(title)+1:].strip()
if re.search(r'^(excretion|elimination)\s*(:|—|-|–)', paragraph.lower()) and not self.Excretion:
title = re.split(':|—|-|–', paragraph)[0]
self.Excretion = paragraph[len(title)+1:].strip()
if re.search(r'^(food effect|food effects|effect of food|effects of food)\s*(:|—|-|–)', paragraph.lower()) and not self.Food_Effect_1:
title = re.split(':|—|-|–', paragraph)[0]
self.Food_Effect_1 = paragraph[len(title)+1:].strip()
if self.Absorption:
paragraph_list = self.Absorption.split('\n\n')
for paragraph in paragraph_list:
if re.search(r'^(food effect|food effects|effect of food|effects of food)\s*(:|—|-|–)', paragraph.lower()):
paragraph_list.remove(paragraph)
title = re.split(':|—|-|–', paragraph)[0]
self.Food_Effect_2 = paragraph[len(title)+1:].strip()
self.Food_Effect_1 = ''
self.Absorption = '\n\n'.join(paragraph_list)
self.Food_Effect = self.Food_Effect_1 if self.Food_Effect_1 else self.Food_Effect_2
self.Other = self.pharmacokinetics_non_admef()
tube_keywords = ['ng tube', '(ng) tube', 'nasogastric tube', 'gastrostomy tube', 'jejunal tube', 'feeding tube']
tube_list = []
for paragraph in self.Dosage_Administration.split('\n\n'):
if re.search(r'|'.join(tube_keywords), paragraph.lower()):
tube_list.append(paragraph)
self.Feeding_Tube = '\n\n'.join(tube_list)
| Yiwen-Shi/drug-labeling-extraction | core/drugsfda.py | drugsfda.py | py | 13,945 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "static.constants.FILE_PATH.DRUGSFDA_ORIGIN_DOCS",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "static.constants.FILE_PATH",
"line_number": 13,
"usage_type": "name"... |
43045351216 | import time
import json
import os
import requests
class pokemon:
def __init__(self, number, energy):
self.number = number
self.energy = 50
self.hp = hp_from_id(number)
self.attack = attack_from_id(number)
self.defense = defense_from_id(number)
self.special_attack = special_attack_from_id(number)
self.special_defense = special_defense_from_id(number)
self.speed = speed_from_id(number)
class player:
def __init__(self, name):
self.beans = 0
self.name = name
self.pokemon = []
self.last_harvest = None
def harvest(self):
if self.last_harvest == None:
harvested_beans = 40
else:
seconds_since_last_harvest = time.time() - self.last_harvest
harvested_beans = int(seconds_since_last_harvest / 180)
if harvested_beans > 80:
harvested_beans = 80
if harvested_beans > 0:
self.beans += harvested_beans
self.last_harvest = time.time()
self.save_to_file()
print(f"You've harvested {harvested_beans} beans!")
else:
print("Oops, the tree hasn't produced any beans yet! Wait a little longer!")
def print_info(self):
print(f"You currently have {self.beans} beans!\n \n")
def print_salutations(self):
return(f"Hey, {self.name}! What would you like to do? Check your beans, harvest or (add more later)?\n")
def save_to_file(self):
# make dict of attributes
player_data = {
"name": self.name,
"beans": self.beans,
"last_harvest": self.last_harvest
}
file_name = f"{self.name}.json"
file = open(file_name, "w") # open file
json.dump(player_data, file) # write player data to file
file.close() # close file
def load_from_file(self):
file_name = f"{self.name}.json"
if os.path.exists(file_name): #check if player file exists
player_data = json.load(open(file_name)) # open json file
# set attributes
self.name = player_data["name"]
self.beans = player_data["beans"]
self.last_harvest = player_data["last_harvest"]
else:
print("no player file found")
r = requests.get("https://pokeapi.co/api/v2/pokemon/6")
data = r.json()
stats = {x["stat"]["name"]: x[base_stat]for x in data["stats"]}
p1 = player(input("Hey!Whats your name?\n"))
p1.load_from_file()
x = 1
while x == 1:
player_choice = input(p1.print_salutations())
if player_choice == "beans":
p1.print_info()
input("Press Enter to go back to the main menu.")
elif player_choice == "harvest":
p1.harvest()
input("Press Enter to go back to the main menu.")
| Hayden987/PokEggHunt | main.py | main.py | py | 2,832 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 66,... |
7005637816 | """Deleted_columns_in_city
Revision ID: 7b8f459a61b0
Revises: e280451841bb
Create Date: 2021-11-11 15:08:06.260280
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '7b8f459a61b0'
down_revision = 'e280451841bb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('hotel', 'location_link')
op.drop_column('hotel', 'breakfast_included')
op.drop_column('hotel', 'transport_from_airport')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('hotel', sa.Column('transport_from_airport', mysql.ENUM('not_included', 'bus', 'car'), nullable=True))
op.add_column('hotel', sa.Column('breakfast_included', mysql.ENUM('not_included', 'included', 'paid', 'all_inclusive'), nullable=True))
op.add_column('hotel', sa.Column('location_link', mysql.VARCHAR(length=500), nullable=True))
# ### end Alembic commands ###
| nazarkohut/room_book | migrations/versions/7b8f459a61b0_deleted_columns_in_city.py | 7b8f459a61b0_deleted_columns_in_city.py | py | 1,089 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.drop_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",... |
569508846 | import sys,os
pathRacine=os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))
if os.path.dirname(pathRacine) not in sys.path :
sys.path.insert(0,pathRacine)
from .dataBase import Base
if __name__ == "__main__":
from argparse import ArgumentParser
p=ArgumentParser()
p.add_argument('-p',dest='partiel',action="store_true", default=False,help='export de machine, groupe, ratio Maille et Perf uniquement')
p.add_argument('-d',dest='database',default="../myMesh.db",help='nom de la database')
args = p.parse_args()
maBase=Base(args.database)
maBase.initialise()
maBase.exportToCSV(args.partiel)
maBase.close()
| luzpaz/occ-smesh | src/Tools/Verima/Base/exportToCSV.py | exportToCSV.py | py | 694 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_nu... |
72595178664 | # ! ! !
# ESSA points no working
# ! ! !
import re
import os
import sys
import Cython
import ctypes
from ctypes import cdll, CDLL
import random
import datetime
import asyncio
import json
from difflib import SequenceMatcher
import nextcord as discord
from nextcord import ActivityType, guild
from nextcord import user
from nextcord import member
from nextcord import Message
from nextcord import channel
from nextcord.ext import commands, tasks
from nextcord.ext.commands import has_permissions
from nextcord.utils import get
# mydll = cdll.LoadLibrary('c:\\Users\\jaroz\\documents\\fap box\\~Python\\~DIscord-Trader\\clear.dll')
with open('c:\\Users\\jaroz\\Documents\\fap box\\~Python\\~DIscord-Trader\\settings.json', 'r') as d:
settings = json.loads(d.read())
powerUser = settings['powerUser']
pref = settings['prefix']
d.close()
class fg:
black = "\u001b[30m"
red = "\u001b[31m"
green = "\u001b[32m"
yellow = "\u001b[33m"
blue = "\u001b[34m"
magenta = "\u001b[35m"
cyan = "\u001b[36m"
white = "\u001b[37m"
def rgb(r, g, b):
return f"\u001b[38;2;{r};{g};{b}m"
class bg:
black = "\u001b[40m"
red = "\u001b[41m"
green = "\u001b[42m"
yellow = "\u001b[43m"
blue = "\u001b[44m"
magenta = "\u001b[45m"
cyan = "\u001b[46m"
white = "\u001b[47m"
def rgb(r, g, b):
return f"\u001b[48;2;{r};{g};{b}m"
def consoleStatus(status : str):
# mydll.clearScreen()
os.system("cls")
print('\n'*5)
print(f'{fg.white}Status: ')
print(f'{fg.white}Prefix: {fg.cyan}{pref}{fg.white}')
client = commands.Bot(command_prefix=pref)
locTime = datetime.datetime.now()
usersStatsPath = 'c:\\Users\\jaroz\\Documents\\fap box\\~Python\\~DIscord-Trader\\usersStats.json'
serversNames = []
serversIDs = []
# for guild in client.guilds:
# serversList.append(guild)
def add(a : int, b : int):
return a + b;
def subtrack(a : int, b : int):
return a - b;
def percetage(a : int, b : int):
return a % b;
def multiply(a : int, b : int):
return a * b;
def similar(a, b):
return SequenceMatcher(None, a, b).ratio();
def Log(logg : str):
with open('logs.txt', 'a', 'utf8') as logfile:
logfile.write(f"{locTime}\n{logg}\n \n")
@tasks.Loop(seconds=60)
async def loop():
with open(usersStatsPath, 'r') as j:
usersStats = json.loads(j.read())
for member in discord.guilds.members(limit=None):
if not f'user.{member.id}' in usersStats:
usersStats[f'user.{member.id}'] = {}
usersStats[f'user.{member.id}']['essa'] = 0
usersStats[f'user.{member.id}']['warns']
j.close()
with open(usersStatsPath, 'w') as j:
j.write(json.dumps(usersStats), indent=4)
j.close()
@client.event
async def on_ready():
# serversList.append(discord.client.guilds)
for guild in client.guilds:
serversNames.append(guild.name)
serversIDs.append(guild.id)
await client.change_presence(activity=discord.Game(name=settings['activity']))
consoleStatus(f'{fg.green}Online')
await client.get_channel(settings['joinLeaveChannel']).send(f"Hello")
@client.event
async def on_command_error(ctx, error : Exception):
embed=discord.Embed(title=error, description=f'{error}', color=0x00fbff)
embed.set_thumbnail(url=ctx.author.avatar_url)
embed.set_footer(text=datetime.datetime.now())
await ctx.send(embed=embed)
print(f"""
\n
\n
exception: {fg.RED}
{error}
{fg.white}
\n
\n
""")
@client.event
async def on_member_join(member : discord.member):
await client.get_channel(settings['joinLeaveChannel']).send(f"Hello <@{member.id}>")
Log(f'@<{member}> przybył na serwer')
@client.event
async def on_member_remove(ctx, member : discord.member):
await client.get_channell(settings['joinLeaveChannel']).send(f"{member} spierdolił z serwera")
@client.event
async def on_message_delete(ctx, message : discord.message):
if message.author.id != 921832817854398505:
Log(f"{message.author} usunoł wiadomość: \n \n '{message.content}'\n")
@client.event
async def on_message(message : discord.message):
if similar('Jebac Jara', message.content) > 0.5:
await message.channel.send('Nie jara, ciebie jebać')
if similar('Trader', message.content) > 0.7 and message.author.id != 921832817854398505:
await message.channel.send('Trader to bug')
if similar('mati', message.content) > 0.7 and message.author.id:
await message.channel.send("https://media.discordapp.net/attachments/791411982380695592/927649861321519104/Screenshots_2022-01-01-23-31-08.png")
if similar('zelson', message.content) > 0.7 and message.author.id != 921832817854398505:
await message.channel.send('E')
await asyncio.sleep(1)
await message.channel.send('XD')
await asyncio.sleep(1)
await message.channel.send('Kto pytał')
await client.process_commands(message)
@has_permissions(ban_members=True)
@client.command()
async def ban(ctx, member : discord.member, reason=None):
await member.ban()
if reason is None:
reason = 'No provided'
Log(f"user {member.id} has banned from {ctx.author.id} ")
embed=discord.Embed(title="Banned", description="user", color=0x00fbff)
embed.add_field(name="reason", value=f"{reason}", inline=False)
embed.set_thumbnail(url=member.avatar_url)
await ctx.send(embed=embed)
@has_permissions(kick_members=True)
@client.command()
async def kick(ctx, member : discord.Member, reason=None):
await member.kick()
if reason is None:
reason = 'No provided'
Log(f"user {member.id} has kicked from {ctx.author.id} ")
embed=discord.Embed(title="Kicked", description="user", color=0x00fbff)
embed.add_field(name="reason", value=f"{reason}", inline=False)
await ctx.send(embed=embed)
@has_permissions(mute_members=True)
@client.command()
async def mute(ctx, member : discord.Member, reason=None):
muteRole = discord.utils.get(ctx.message.guild.roles, id=settings['muteRole'])
if reason is None:
reason = 'No provided'
await client.add_roles(member, muteRole)
@client.command()
async def log(ctx, loggg : str):
if len(loggg) > 69:
await ctx.channel.send()
try:
Log(loggg)
embed = discord.Embed(title="Udana akcja", description="Log pomyślnie wprowadzony do dziennika", color=0x00fbff)
await ctx.channel.send(embed)
except:
embed = discord.Embed(title="błąd", description="Maksymalna długość zmiennej string loggg to 69 znaków", color=0x00fbff)
await ctx.channel.send(embed)
@client.command()
async def end(ctx):
if ctx.author.id == powerUser:
consoleStatus(f'{fg.red}Offline')
await ctx.channel.send("baj")
sys.exit(0)
else:
await ctx.channel.send('Missing permisions !')
@client.command()
async def rename(ctx, name : str):
if ctx.author.id == powerUser:
await client.user.edit(username=name)
else:
ctx.channel.send('Missing permisions !')
@client.command()
async def retag(ctx):
botTag = client.get_user(921832817854398505).discriminator
await client.user.edit(username=botTag)
await client.user.edit(username='Trader')
@client.command()
async def restatus(ctx, typeActicity : discord.activity, status : str):
if ctx.author.id == powerUser:
await client.change_presence(activity=ActivityType(name=status))
else:
await ctx.channel.send('Missing permisions !')
@client.command()
async def prefix(ctx):
await ctx.channel.send(f"My prefix is {pref}")
@client.command()
async def av(ctx, *, member : discord.Member = None):
if member is None:
member == ctx.author
avatar = member.avatar
await ctx.channel.send(avatar)
@client.command()
async def serversL(ctx):
for i in serversNames:
await ctx.send(i)
@client.command()
async def source(ctx):
src = open('c:\\Users\\jaroz\\Documents\\fap box\\~Python\\~DIscord-Trader\\bot.py', 'r').read()
await ctx.channel.send(f"```py\n{src}\n```")
@client.command()
async def plus(ctx, member : discord.Member, amount : int):
j = open('c:\\Users\\jaroz\\Documents\\fap box\\~Python\\~DIscord-Trader\\essaPoints.json', 'a')
essa = json.loads(j.read())
for member in ctx.guild.members(limit=None):
if not member.id in essa:
essa.append({f'{member.id}': 0})
if essa[f"{ctx.author.id}"] < amount and ctx.author.id != powerUser:
embed=discord.Embed(title=f'error - {datetime.datetime.now()}', description='Twoja essa jest za mała', color=0x00fbff)
embed.set_thumbnail(url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
fromUser = int(essa[f'"{ctx.author.id}"'])
toUser = int(essa[f'"{member.id}"'])
fromUser -= amount
toUser += amount
j.close()
@client.command()
async def send(ctx, msg : str):
await ctx.message.delete()
await ctx.channel.send(f"{msg}")
Log(f"\n <@{ctx.author.id}> używając {settings['prefix']} wyslal wiadomość {msg}")
@client.command()
async def similarity(ctx, a : str, b : str):
await ctx.channel.send(f'{float(similarity(a, b))}%')
@client.command()
async def anal(ctx, member : discord.Member):
await ctx.channel.send(f"*<@{ctx.author.id}> zaczol zapinać <@{member.id}> prosto w jego dupalce :hot_face:*")
await asyncio.sleep(5)
await ctx.channel.send(f"*<@{ctx.author.id}> spuscił się <@{member.id}> prosto w głąb jego mokrego odbytu :weary:*")
@client.command()
async def roll(ctx, min : int, max : int):
if max > 6900 or max < -6900:
max = 6900
if min < -6900 or min > 6900:
min = -6900
await ctx.channel.send(random.randint(min, max))
@client.command()
async def choice(ctx, choice0 : str, choice1 : str, choice2=None, choice3=None, choice4=None):
choices = []
if choice0 != None:
choices.append(choice0)
if choice1 != None:
choices.append(choice1)
if choice2 != None:
choices.append(choice2)
if choice3 != None:
choices.append(choice3)
if choice4 != None:
choices.append(choice4)
await ctx.channel.send(random.choice(choices))
@client.command()
async def meme(ctx):
memes = [
'https://cdn.discordapp.com/attachments/753683283182747759/939476919739621407/FB_IMG_1643793609067.jpg', # 0
'https://cdn.discordapp.com/attachments/753683283182747759/939476920108728330/FB_IMG_1643793642945.jpg', # 1
'https://cdn.discordapp.com/attachments/753683283182747759/939476920360370206/FB_IMG_1643793770997.jpg', # 2
'https://cdn.discordapp.com/attachments/753683283182747759/939478970410356766/Untitled18_20220204165110.png', # 3
'x', # 4
'x', # 5
'x', # 6
'x', # 7
'x', # 8
'x', # 9
'x', # 10
'x', # 11
'x', # 12
'x', # 13
'x', # 14
'x', # 15
'x', # 16
'x', # 17
'x', # 18
'x' # 19
]
memeChoice = random.choice(memes)
while memeChoice == 'x':
memeChoice = random.choice(memes)
await ctx.channel.send(random.choice(memes))
# await client.say(ctx.message.channel, f"{random.choice(memes)}")
@client.command()
async def film(ctx):
films = [
'https://cdn.discordapp.com/attachments/296056831514509312/773404299609767946/video0-58.mp4',
'https://cdn.discordapp.com/attachments/296056831514509312/773404299609767946/video0-58.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/806854489742245888/video0_2.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/808636325934792744/7b2c1bf81d9016431aa46ea9a5c3a130.mp4',
'https://cdn.discordapp.com/attachments/796881224718221323/808303823974039602/redditsave.com-weird_flex_but_ok-edzlanht70e61.mp4',
'https://cdn.discordapp.com/attachments/796881224718221323/808318518835413003/Teams.mp4',
'https://cdn.discordapp.com/attachments/792792798377279509/808089281435926548/video0.mp4',
'https://cdn.discordapp.com/attachments/621057340963160115/809166647108960257/video-1612904500.mp4',
'https://cdn.discordapp.com/attachments/755771736561287209/809730636801376256/video0-67.mp4',
'https://cdn.discordapp.com/attachments/755771736561287209/809731909801476116/video0_1_2.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/809765492671774760/video-1613040259.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/810244464396206090/Top_10_niebezpieczne_gangi-2.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/811210304452165672/xD.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/812319349166833664/video0_6.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/812628207530278922/video0-15_1.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/812825229746765844/2e0bb6c0-a98d-4f22-83c5-abba8a93000c.mp4',
'https://cdn.discordapp.com/attachments/773151424724729886/814106087665172540/true.mp4',
'https://cdn.discordapp.com/attachments/676232229189451787/816653608975204382/video0.mp4',
'https://cdn.discordapp.com/attachments/676232229189451787/816725693017751634/video0a.mp4'
]
await ctx.channel.send(random.choice(films))
client.run(settings["token"])
| Czuowuek-SOS/Bot | Main.py | Main.py | py | 14,480 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.commands.Bot",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.commands"... |
10256190705 | import threading
import time
import pygame
import random
import socket
import pickle
"""
10 x 20 square grid
shapes: S, Z, I, O, J, L, T
represented in order by 0 - 6
"""
pygame.font.init()
# GLOBALS VARS
s_width = 1200
s_height = 750
play_width = 300 # meaning 300 // 10 = 30 width per block
play_height = 600 # meaning 600 // 20 = 30 height per block
block_size = 30
top_left_y = s_height - play_height - 30
opponent_grid = None
high_score = 0
host = "127.0.0.1" # localhost
port = 55555
# SHAPE FORMATS
S = [['.....',
'......',
'..00..',
'.00...',
'.....'],
['.....',
'..0..',
'..00.',
'...0.',
'.....']]
Z = [['.....',
'.....',
'.00..',
'..00.',
'.....'],
['.....',
'..0..',
'.00..',
'.0...',
'.....']]
I = [['..0..',
'..0..',
'..0..',
'..0..',
'.....'],
['.....',
'0000.',
'.....',
'.....',
'.....']]
O = [['.....',
'.....',
'.00..',
'.00..',
'.....']]
J = [['.....',
'.0...',
'.000.',
'.....',
'.....'],
['.....',
'..00.',
'..0..',
'..0..',
'.....'],
['.....',
'.....',
'.000.',
'...0.',
'.....'],
['.....',
'..0..',
'..0..',
'.00..',
'.....']]
L = [['.....',
'...0.',
'.000.',
'.....',
'.....'],
['.....',
'..0..',
'..0..',
'..00.',
'.....'],
['.....',
'.....',
'.000.',
'.0...',
'.....'],
['.....',
'.00..',
'..0..',
'..0..',
'.....']]
T = [['.....',
'..0..',
'.000.',
'.....',
'.....'],
['.....',
'..0..',
'..00.',
'..0..',
'.....'],
['.....',
'.....',
'.000.',
'..0..',
'.....'],
['.....',
'..0..',
'.00..',
'..0..',
'.....']]
shapes = [S, Z, I, O, J, L, T]
shape_colors = [(0, 255, 0), (255, 0, 0), (0, 255, 255), (255, 255, 0), (255, 165, 0), (0, 0, 255), (128, 0, 128)]
# index 0 - 6 represent shape
class Piece(object):
def __init__(self, x, y, shape):
self.x = x
self.y = y
self.shape = shape
self.color = shape_colors[shapes.index(shape)]
self.rotation = 0
def create_grid(locked_positions={}):
grid = [[(0, 0, 0) for x in range(10)] for x in range(20)] # initialize grid with 10 * 20 with each grid color (0, 0, 0)
for i in range(len(grid)):
for j in range(len(grid[i])):
if (j, i) in locked_positions:
c = locked_positions[(j, i)]
grid[i][j] = c
return grid
def convert_shape_format(shape):
positions = []
format = shape.shape[shape.rotation % len(shape.shape)]
for i, line in enumerate(format):
row = list(line)
for j, column in enumerate(row):
if column == "0":
positions.append((shape.x + j, shape.y + i))
for i, position in enumerate(positions):
positions[i] = (position[0] - 2, position[1] - 4)
return positions
def valid_space(shape, grid):
# flatten 2d list to 1d list e.g. 20x10 -> 200x1
# grid[i][j] == (0, 0, 0) is to check whether the position has existing block already
accepted_position = [[(j, i) for j in range(10) if grid[i][j] == (0, 0, 0)] for i in range(20)]
accepted_position = [j for sub in accepted_position for j in sub]
formatted = convert_shape_format(shape)
for pos in formatted:
if pos not in accepted_position:
if pos[1] > -1:
return False
return True
def check_lost(positions):
for pos in positions:
x, y = pos
if y < 1:
return True
return False
def get_shape():
return Piece(5, 0, random.choice(shapes))
def draw_text_middle(surface, text, size, color):
font = pygame.font.SysFont("comicsans", size, bold=True)
label = font.render(text, 1, color)
surface.blit(label, (s_width // 2 - label.get_width() / 2,
s_height // 2 - label.get_height() / 2))
def draw_grid(surface, grid, top_left_x):
sx = top_left_x
sy = top_left_y
for i in range(len(grid)):
pygame.draw.line(surface, (128, 128, 128), (sx, sy + i * block_size), (sx + play_width, sy + i * block_size))
for j in range(len(grid[i])):
pygame.draw.line(surface, (128, 128, 128), (sx + j * block_size, sy), (sx + j * block_size, sy + play_height))
def clear_rows(grid, locked_positions):
increment = 0
for i in range(len(grid) - 1, -1, -1):
row = grid[i]
if (0, 0, 0) not in row:
increment += 1
row_index = i
for j in range(len(row)):
try:
del locked_positions[(j, i)]
except:
continue
if increment > 0:
for key in sorted(list(locked_positions), key=lambda x: x[1])[::-1]:
x, y = key
if y < row_index: # only shift the blocks above the row index with increment value
new_key = (x, y + increment)
locked_positions[new_key] = locked_positions.pop(key)
return increment
def draw_next_shape(shape, surface, top_left_x, multiplayer):
font = pygame.font.SysFont("comicsans", 30)
label = font.render("Next Shape", 1, (255, 255, 255))
if multiplayer:
sx = top_left_x + play_width * 2 + 100
else:
sx = top_left_x + play_width + 50
sy = top_left_y + play_height / 2 - 120
format = shape.shape[shape.rotation % len(shape.shape)]
for i, line in enumerate(format):
row = list(line)
for j, column in enumerate(row):
if column == "0":
pygame.draw.rect(surface, shape.color, (sx + j * 30 + 15, sy + i * 30 + 25, block_size, block_size))
surface.blit(label, (sx + 10, sy - 30))
def draw_window(surface, player_grid, multi_player, top_left_x, opp_top_left_x, score=0, high_score=0):
surface.fill((0, 0, 0))
pygame.font.init()
font = pygame.font.SysFont("comicsans", 40)
player_label = font.render("You", True, (255, 255, 255)) # Can further change to player's nickname
opp_label = font.render("Opponent", True, (255, 255, 255)) # Can further change to opponent's nickname
surface.blit(player_label, (top_left_x + play_width / 2 - (player_label.get_width() / 2), 30)) # middle of the screen
if multi_player:
surface.blit(opp_label, (opp_top_left_x + play_width / 2 - (opp_label.get_width() / 2), 30)) # middle of the screen
# print current score
font = pygame.font.SysFont("comicsans", 30)
label = font.render("Score: {0}".format(score), True, (255, 255, 255))
if multi_player:
sx = top_left_x + play_width * 2 + 100
else:
sx = top_left_x + play_width + 50
sy = top_left_y + play_height / 2 - 100
surface.blit(label, (sx + 20, sy + 160))
# print last score
label = font.render("High score: {0}".format(high_score), True, (255, 255, 255))
sx = top_left_x - 250
sy = top_left_y + 100
surface.blit(label, (sx + 10, sy + 160))
for i in range(len(player_grid)):
for j in range(len(player_grid[i])):
pygame.draw.rect(surface, player_grid[i][j], (top_left_x + j * block_size, top_left_y + i * block_size, block_size, block_size), 0)
if multi_player:
pygame.draw.rect(surface, opponent_grid[i][j], (opp_top_left_x + j * block_size, top_left_y + i * block_size, block_size, block_size), 0)
pygame.draw.rect(surface, (255, 0, 0), (top_left_x, top_left_y, play_width + 1, play_height), 4)
if multi_player:
pygame.draw.rect(surface, (255, 0, 0), (opp_top_left_x, top_left_y, play_width + 1, play_height), 4)
draw_grid(surface, player_grid, top_left_x)
if multi_player:
draw_grid(surface, opponent_grid, opp_top_left_x)
def main(win, multi_player, client):
# def main(win, multi_player):
if multi_player:
top_left_x = s_width // 2 - play_width - 30
else:
top_left_x = (s_width - play_width) // 2
opp_top_left_x = s_width // 2 - play_width + 300
locked_positions = {}
change_piece = False
run = True
current_piece = get_shape()
next_piece = get_shape()
clock = pygame.time.Clock()
fall_time = 0
fall_speed = 0.27
level_time = 0
score = 0
global opponent_grid
opponent_grid = create_grid(locked_positions)
while run:
player_grid = create_grid(locked_positions)
fall_time += clock.get_rawtime() # get time since last clock.tick()
level_time += clock.get_rawtime()
clock.tick()
if level_time / 1000 > 5:
level_time = 0
if fall_speed > 0.12: # max speed = 0.27 - 0.12 = 0.15
fall_speed -= 0.005
if fall_time / 1000 > fall_speed:
fall_time = 0
current_piece.y += 1
if not (valid_space(current_piece, player_grid)) and current_piece.y > 0: # piece is not at the top
current_piece.y -= 1
change_piece = True # lock the piece and generate another piece
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
current_piece.x -= 1
if not (valid_space(current_piece, player_grid)):
current_piece.x += 1
if event.key == pygame.K_RIGHT:
current_piece.x += 1
if not (valid_space(current_piece, player_grid)):
current_piece.x -= 1
if event.key == pygame.K_DOWN:
current_piece.y += 1
if not (valid_space(current_piece, player_grid)):
current_piece.y -= 1
if event.key == pygame.K_UP:
current_piece.rotation += 1
if not (valid_space(current_piece, player_grid)):
current_piece.rotation -= 1
shape_positions = convert_shape_format(current_piece)
for i in range(len(shape_positions)):
x, y = shape_positions[i]
if y > -1:
player_grid[y][x] = current_piece.color
if change_piece:
for position in shape_positions:
pos = (position[0], position[1])
locked_positions[pos] = current_piece.color # append frozen pieces to locked positions dict
current_piece = next_piece
next_piece = get_shape()
change_piece = False
score += clear_rows(player_grid, locked_positions) * 10
draw_window(win, player_grid, multi_player, top_left_x, opp_top_left_x, score, high_score)
draw_next_shape(next_piece, win, top_left_x, multi_player)
if multi_player:
client.send(pickle.dumps(player_grid))
data = pickle.loads(client.recv(16384))
if data == "Lost":
draw_text_middle(win, "YOU WIN!", 80, (255, 255, 255))
pygame.display.update()
pygame.time.delay(1500)
run = False
client.send(pickle.dumps("Score,{0}".format(score)))
else:
opponent_grid = data
pygame.display.update()
if check_lost(locked_positions):
draw_text_middle(win, "YOU LOST!", 80, (255, 255, 255))
client.send(pickle.dumps("Lost"))
pygame.display.update()
pygame.time.delay(1500)
run = False
client.send(pickle.dumps("Score,{0}".format(score)))
def ready(client):
run = True
while run:
message = pickle.loads(client.recv(1024))
if message == "Game start":
print(message)
run = False
main(win, True, client)
pygame.display.quit()
def waiting_room(win):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
message = client.recv(1024).decode()
if message == "Connected to the server":
print(message)
win.fill((0, 0, 0))
draw_text_middle(win, "Please wait for another player", 60, (255, 255, 255))
pygame.display.update()
while True:
message = client.recv(1024).decode()
if "Game start" in message:
print("Game start")
global high_score
high_score = int(message.split(',')[1].strip())
break
main(win, True, client)
pygame.display.quit()
else:
print("Connection error")
client.close()
win = pygame.display.set_mode((s_width, s_height))
pygame.display.set_caption("Tetris")
waiting_room(win) # start game
| lewis0926/online-tetris-game | client.py | client.py | py | 13,057 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.font.init",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont"... |
19830142972 | from datetime import datetime
from typing import List, Optional, Tuple
from api.models.booking_model import Booking
from api.models.room_model import Room
from api.models.user_model import User
from api.repository.rooms_repository import RoomsRepository
from api.repository.user_repository import UserRepository
from .abstract_repository import AbstractRepository
class BookingRepository(AbstractRepository):
""""""
HEADERS = ["id", "note", "room_id", "user_id", "time_from", "time_to"]
def create_table(self) -> None:
""""""
query = """
CREATE TABLE IF NOT EXISTS bookings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
note TEXT,
room_id INTEGER NOT NULL,
user_id INTEGER NOT NULL,
time_from timestamp NOT NULL,
time_to timestamp NOT NULL,
FOREIGN KEY (room_id)
REFERENCES rooms (id),
FOREIGN KEY (user_id)
REFERENCES bookings (id)
)
"""
with self.open_cursor() as cursor:
cursor.execute(query)
def add(self, booking: Booking) -> None:
"""
Add a room booking
"""
query = """
INSERT INTO bookings (
note,
room_id,
user_id,
time_from,
time_to
) VALUES (?, ?, ?, ?, ?)"""
with self.open_cursor() as cursor:
cursor.execute(query, (booking.note, booking.room_id, booking.user_id,
booking.time_from, booking.time_to))
booking.id = cursor.lastrowid
def get_by_id(self, booking_id: str) -> Optional[Booking]:
""""""
query = """
SELECT * FROM bookings
INNER JOIN users ON users.id = bookings.user_id
INNER JOIN rooms ON rooms.id = bookings.room_id
WHERE bookings.id = ?
"""
with self.open_cursor() as cursor:
cursor.execute(query, (booking_id,))
result = cursor.fetchone()
return self._tuple_to_booking(result) if result else None
def get_all_by_room_id(self, room_id: str) -> List[Booking]:
"""
Gets all bookings by a room id
"""
query = """
SELECT * FROM bookings
INNER JOIN users ON users.id = bookings.user_id
INNER JOIN rooms ON rooms.id = bookings.room_id
WHERE room_id = ?
"""
with self.open_cursor() as cursor:
cursor.execute(query, (room_id,))
results = cursor.fetchall()
return [self._tuple_to_booking(result) for result in results]
def get_all_by_time(self, time_from: datetime, time_to: datetime, room_id: Optional[str] = None) -> List[Booking]:
"""
Gets all bookings within time frame
Optionally can use a room_id to also select by room id
"""
if room_id is None:
query = """
SELECT * FROM bookings
INNER JOIN users ON users.id = bookings.user_id
INNER JOIN rooms ON rooms.id = bookings.room_id
WHERE time_from BETWEEN ? AND ?
"""
params = (time_from, time_to)
else:
query = """
SELECT * FROM bookings
INNER JOIN users ON users.id = bookings.user_id
INNER JOIN rooms ON rooms.id = bookings.room_id
WHERE room_id = ? AND time_from BETWEEN ? AND ?
"""
params = (room_id, time_from, time_to)
with self.open_cursor() as cursor:
cursor.execute(query, params)
results = cursor.fetchall()
return [self._tuple_to_booking(result) for result in results]
def get_all_by_user_id(self, user_id: str) -> List[Booking]:
""""""
query = """
SELECT * FROM bookings
INNER JOIN users ON users.id = bookings.user_id
INNER JOIN rooms ON rooms.id = bookings.room_id
WHERE user_id = ?
"""
with self.open_cursor() as cursor:
cursor.execute(query, (user_id,))
results = cursor.fetchall()
return [self._tuple_to_booking(result) for result in results]
def delete(self, booking: Booking) -> None:
"""
"""
query = "DELETE FROM bookings WHERE id = ?"
with self.open_cursor() as cursor:
cursor.execute(query, (booking.id,))
def delete_all_by_user_id(self, user_id: str) -> None:
""""""
query = "DELETE FROM bookings WHERE user_id = ?"
with self.open_cursor() as cursor:
cursor.execute(query, (user_id,))
def _tuple_to_booking(self, data: Tuple) -> Booking:
""" """
booking_data = dict(zip(self.HEADERS, data[:6]))
user_data = dict(zip(UserRepository.HEADERS, data[6:11]))
room_data = dict(zip(RoomsRepository.HEADERS, data[11:]))
time_from_timestamp = booking_data.get("time_from")
time_to_timestamp = booking_data.get("time_to")
booking_data["time_from"] = datetime.fromisoformat(time_from_timestamp)
booking_data["time_to"] = datetime.fromisoformat(time_to_timestamp)
return Booking(**booking_data, user=User(**user_data), room=Room(**room_data))
| Tomdango/Q4-2021-SoftEngDevOps | api/repository/booking_repository.py | booking_repository.py | py | 5,261 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abstract_repository.AbstractRepository",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "api.models.booking_model.Booking",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 57,
"usage_type": "name"
},
... |
71570408423 | import time
import numpy as np
import math
from numba import cuda
from matrix_operations import matmul_packed32_shared, matmul_packed32, TPB
from matrix_convert import to_gpu, from_gpu, to_type, from_type
np.random.seed(1)
def run_uint32(size=10000, times=50, sparsity=0.7):
A, B = np.random.randint(0, 100, (size,) * 2) > (sparsity * 100)
C = np.zeros_like(A)
matrices = {'A': A, 'B': B, 'C': C}
matrices = to_type(matrices, 'uint32')
matrices = to_gpu(matrices)
is_changed = cuda.device_array((1,), dtype=bool)
blockspergrid = tuple(int(math.ceil(A.shape[i] / TPB[i])) for i in (0, 1))
print('Begin multuplying without shared memory')
begin_no_shared = time.time()
for i in range(times):
matmul_packed32[blockspergrid, TPB](A, B, C, is_changed)
end_no_shared = time.time()
print('Begin multuplying with shared memory')
begin_shared = time.time()
for i in range(times):
matmul_packed32_shared[blockspergrid, TPB](A, B, C, is_changed)
end_shared = time.time()
matrices = from_gpu(matrices)
matrices = from_type(matrices)
shared_average = (end_shared - begin_shared) / float(times)
not_shared_average = (end_no_shared - begin_no_shared) / float(times)
print('For shared memory average is {}'.format(shared_average))
print('For not shared memory average is {}'.format(not_shared_average))
if __name__ == '__main__':
print('Testing uint32 multiplication')
for size in (100, 1000, 100000):
print('-> Size of matrix {}'.format((size,) * 2))
run_uint32(size, times=50, sparsity=0.7) | EgorNemchinov/formal-languages | test_multiplication.py | test_multiplication.py | py | 1,614 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",... |
29292987146 | import typer
from typing import Optional
import os
import sys
import json
from alternat.generation import Generator
from alternat.generation.exceptions import InvalidConfigFile, InvalidGeneratorDriver
from alternat.collection import Collector
import subprocess
import shutil
app = typer.Typer()
# This key determines the driver (if present in the json file)
_DRIVER_KEY = "DRIVER"
# This key determines the generator level config (if present in the json file)
_GENERATOR_KEY = "GENERATOR"
@app.command("collect")
def collect(
url: str = None,
output_dir_path: str = None,
download_recursive: bool = False,
collect_using_apify: bool = True,
):
"""Collects image from URL specified.
:param url: The URL from where the image needs to be downloaded.
:type url: str
:param output_dir_path: Path to directory where the images downloaded should be dumped
:type output_dir_path: str
:param download_recursive: Whether to recursively crawl a site, defaults to False
:type download_recursive: bool, optional
:param collect_using_apify: Whether to crawl using appify crawler, defaults to True
:type collect_using_apify: bool, optional
"""
collector = Collector()
if url is None:
typer.echo("ERROR : Specify url from which images needs to be collected using --url argument")
return None
if output_dir_path is None:
typer.echo("ERROR : Specify output directory path where the images needs to be collected using "
"--output-dir-path argument")
return None
collector.process(url, output_dir_path, download_recursive, collect_using_apify)
@app.command("generate")
def generate(
output_dir_path: str = typer.Option(...),
input_dir_path: str = None,
input_image_file_path: str = None,
base64_image: str = None,
driver_config_file_path: str = None
):
"""Analyze the image to generate alt-text
:param output_dir_path: Output dir path to store the results, defaults to typer.Option(...)
:type output_dir_path: str, optional
:param input_dir_path: Directory path to the folder containing images, defaults to None
:type input_dir_path: str, optional
:param input_image_file_path: Path to image file to be processed (used only if single image needs to be processed), defaults to None
:type input_image_file_path: str, optional
:param base64_image: Base64 image to be processed (used only if single image needs to be processed), defaults to None
:type base64_image: str, optional
:param driver_config_file_path: Path to the generator JSON config file (defaults will be used if not provided)
:return:, defaults to None
:type driver_config_file_path: str, optional
:raises InvalidGeneratorDriver: Driver invalid error.
:raises InvalidConfigFile: Configuration file is invalid.
:return: collection of JSON representing alt-text data for images
:rtype: [type]
"""
generator = Generator()
if driver_config_file_path is not None:
file_extension = driver_config_file_path.split(".")[-1]
if file_extension.lower() == "json":
#read the json file
with open(driver_config_file_path) as f:
data = json.load(f)
if _DRIVER_KEY in data.keys():
generator = Generator(data[_DRIVER_KEY])
generator.set_driver_config(data)
else:
raise InvalidGeneratorDriver(Generator.ALLOWED_DRIVERS)
# check if generator conf is present
if _GENERATOR_KEY in data.keys():
generator_conf = data[_GENERATOR_KEY]
generator.set_config(generator_conf)
else:
raise InvalidConfigFile()
results = []
if input_dir_path is None:
if input_image_file_path is None:
if base64_image is not None:
result_json = generator.generate_alt_text_from_base64(base64_image)
print(result_json)
return result_json
else:
typer.echo("One of --base64_image or --input-image-file-path is missing")
return
else:
typer.echo("Processing image : %s" % input_image_file_path)
result_json = generator.generate_alt_text_from_file(input_image_file_path, output_dir_path)
print(result_json)
return result_json
else:
for path, subdirs, files in os.walk(input_dir_path):
for filename in files:
if (
filename.endswith(".jpg")
or filename.endswith(".png")
or filename.endswith(".jpeg")
):
image_path = os.path.join(path, filename)
typer.echo("Processing image : %s" % image_path)
result_json = generator.generate_alt_text_from_file(image_path, output_dir_path)
print(result_json)
results.append(result_json)
typer.echo("Result saved at : %s" % output_dir_path)
return results
if __name__ == "__main__":
app()
| keplerlab/alternat | app.py | app.py | py | 5,217 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "typer.Typer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "alternat.collection.Collector",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "typer.echo",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "typer.echo",
... |
17704058430 | import os
import yaml
from behave import fixture, use_fixture
from selenium import webdriver
browser_type = os.environ.get("BROWSER", "chrome")
platform = os.environ.get("PLATFORM", "desktop")
def get_browser(name):
if name == "chrome":
return webdriver.Chrome()
else:
return None
def get_vars():
variables = {}
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
for filename in os.listdir(dir_path):
if filename.endswith("_variables.yaml"):
file = open(os.path.join(dir_path, filename))
local_vars = yaml.load(file, yaml.SafeLoader)
file.close()
basename = filename.split("_")[0]
variables[basename] = local_vars
return variables
def get_lookup(variables):
def lookup(name):
parts = name.split("::")
node = variables
for p in parts:
if not isinstance(node, dict):
return name
n = node.get(p, None)
if n is None:
return name
node = n
return node
return lookup
@fixture
def selenium_browser_chrome(context):
# -- HINT: @behave.fixture is similar to @contextlib.contextmanager
context.browser = webdriver.Chrome()
context.lookup = get_lookup(get_vars())
yield context.browser
# -- CLEANUP-FIXTURE PART:
context.browser.quit()
def before_all(context):
use_fixture(selenium_browser_chrome, context)
# -- HINT: CLEANUP-FIXTURE is performed after after_all() hook is called.
| ChaitanyaAdhav/SHORE_Capital | features/environment.py | environment.py | py | 1,566 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_num... |
22912869355 | import argparse
import conf
from dfwrapper import HeinzWrapper, ResistanceWrapper
from plotting import Plotter
def main(args):
conf.configure_from_args(args)
curr_wrapper = HeinzWrapper(conf.curr_file_names, 'curr')
volt_wrapper = HeinzWrapper(conf.volt_file_names, 'volt')
comb_wrapper = ResistanceWrapper(curr_wrapper, volt_wrapper)
my_plotter = Plotter(comb_wrapper)
my_plotter.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datelist", nargs="+", default=None, help="dates to consider")
parser.add_argument("--loglvl", type=int, default=0, help="0: warning, 1: info, 2: debug")
parser.add_argument("--outputfolder", type=str, default="data/output/",
help="name of output file")
args = parser.parse_args()
main(args) | ligerlac/HVAnalysis | HVAnalysis/make_resistance_plot.py | make_resistance_plot.py | py | 839 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "conf.configure_from_args",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dfwrapper.HeinzWrapper",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "conf.curr_file_names",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name":... |
13158886481 | from flask import Flask, render_template, jsonify
import serial
import sys
import threading
import openpyxl
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
import datetime
app = Flask(__name__)
latest_tag = ''
tag_count = 0
tag_data = []
def convert_tag_from_bytes_to_hex(tag_bytes_list):
tag_hex_value = ""
for index, bytes_value in enumerate(tag_bytes_list):
# First 3 bytes and last byte are placeholders
if index > 3 and index < 16:
tag_hex_value += "{0:02X}".format(bytes_value)
return tag_hex_value
def run_test():
global latest_tag
global tag_count
global tag_data
tag_bytes_list_for_device_1 = []
tag_hex_value_list = set([])
should_read_tag_from_device_1 = False
try:
serial_device_1 = serial.Serial('COM4', 57600, timeout=0.5)
except serial.serialutil.SerialException as err:
print('There was a problem while opening the ports for the reader')
raise err
try:
serial_device_1.reset_input_buffer()
# create a new workbook and sheet
wb = Workbook()
sheet = wb.active
sheet.title = "RFID Data"
sheet['A1'] = "Tag Value"
sheet['B1'] = "Timestamp"
row_num = 2
written_tag_values = set()
while True:
tag_hex_value = ""
read_bytes_from_device_1 = serial_device_1.read()
int_value_from_device_1 = int.from_bytes(read_bytes_from_device_1, "big")
if int_value_from_device_1 == 0x11:
should_read_tag_from_device_1 = True
if should_read_tag_from_device_1 is True:
tag_bytes_list_for_device_1.append(int_value_from_device_1)
if len(tag_bytes_list_for_device_1) == 18:
should_read_tag_from_device_1 = False
tag_hex_value = convert_tag_from_bytes_to_hex(tag_bytes_list_for_device_1)
tag_hex_value_list.add(tag_hex_value)
tag_bytes_list_for_device_1.clear()
# update latest_tag and tag_count
latest_tag = tag_hex_value
tag_count = len(tag_hex_value_list)
if tag_hex_value not in written_tag_values :
# write to excel sheet
sheet.cell(row=row_num, column=1).value = tag_hex_value
written_tag_values.add(tag_hex_value)
sheet.cell(row=row_num, column=2).value = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
row_num += 1
wb.save("RFID_Data.xlsx")
print(f"RFID Tag count: {tag_count}, Latest tag: {latest_tag}")
except KeyboardInterrupt:
print("Received keyboard interrupt in the RFID reader test program. Closing the ports and exiting the program")
serial_device_1.flush()
serial_device_1.reset_input_buffer()
serial_device_1.close()
sys.exit(0)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/stream')
def stream():
def event_stream():
while True:
yield 'data: {{"latest_tag": "{0}", "tag_count": "{1}"}}\n\n'.format(latest_tag, tag_count)
return app.response_class(event_stream(), mimetype='text/event-stream')
if __name__ == "__main__":
run_test_thread = threading.Thread(target=run_test)
run_test_thread.start()
app.run(debug=True)
| SudeepKulkarni3301/LI2-Internship | app_excel_file.py | app_excel_file.py | py | 3,692 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "serial.serialutil",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "openpyxl.Workbook",
... |
34113477971 | import numpy as np
import matplotlib.pyplot as plt
import pystan
np.set_printoptions(precision=5, suppress=True)
import gzip
import os
import pdb
import pickle
seed = 1
np.random.seed(seed)
n_N = 500
n_F = 5
n_Y = 10
n_D = 2000
n_W = 1000
n_C = 4
AD = 0.95
Y = np.zeros((n_N, n_Y))
B = np.zeros((n_Y, n_F))
log_F_sigma = np.zeros((n_F))
# y hyperparameters
log_y_sigma = np.random.normal(0, 1) * np.ones(n_Y)
# f hyperparameters
# log_f_sigma = np.random.normal(0, 0.25, n_F)
# diag chol
# chol_log_f_sigma = np.diag(np.abs(np.random.normal(0, 0.1, n_F)))
# full chol
chol_log_f_sigma = np.random.normal(0, 1, n_F * n_F).reshape(n_F, n_F)
row, col = np.diag_indices(n_F)
chol_log_f_sigma[row, col] = np.abs(chol_log_f_sigma[row, col])
# B
base_order = 1
# base_order = 0.05
# bias_order = 0.5
# p_connect = 0.3
# n_connect = np.int(0.3 * n_Y * n_F)
# add = (2 * np.random.binomial(1, 0.5, n_connect) - 1) * (bias_order + np.abs(np.random.standard_normal(n_connect)))
B_ = base_order * np.random.standard_normal(n_Y * n_F)
# B_[:n_connect] += add
B_ = np.random.permutation(B_).reshape(n_Y, n_F)
row, col = np.triu_indices(n_Y, 0, n_F)
B_[row, col] = 0
np.fill_diagonal(B_, 1)
# Initialise
# log_F_sigma[0] = np.random.multivariate_normal(log_f_sigma_, chol_log_f_sigma ** 2)
log_F_sigma = np.zeros(n_F) # chol_log_f_sigma @ np.random.standard_normal(n_F)
B = B_ # + base_order * np.tril(np.random.standard_normal(n_Y * n_F).reshape(n_Y, n_F), k=-1)
for i in range(1, n_N):
Y[i] = B @ np.random.multivariate_normal(np.zeros(n_F), np.diag(np.exp(2 * log_F_sigma))) + np.exp(log_y_sigma) * np.random.standard_normal(n_Y)
dat = {
'P': n_Y,
'F': n_F,
'N': n_N,
# 'fac_mu': np.zeros(n_F),
'y': Y
}
model = pystan.StanModel(file='infer.stan')
fit = model.sampling(data=dat, iter=n_D, warmup=n_W, seed=seed, chains=n_C, control={'adapt_delta':AD})
# with gzip.open('pystan_non_tv_fit_{}_{}_{}_{}_{}.gz'.format(n_D, n_W, seed, n_C, AD), 'wb') as f:
# pickle.dump({'model' : model, 'fit' : fit}, f)
res = fit.extract(pars=['beta', 'fac'])
plt.scatter(B, np.mean(res['beta'], axis=0))
plt.show()
pdb.set_trace()
print(B - np.mean(res['beta'], axis=0))
print('log_y_sigma', log_y_sigma)
print('log_F_sigma', log_F_sigma)
print(fit.stansummary(pars=['log_y_sd', 'L_lower']))
Y_hat = np.einsum('ijk,ilk->ilj', res['beta'], res['fac'])
print(np.mean(Y - Y_hat, axis=(0, 1)))
| atechnicolorskye/post_sum | infer/infer.py | infer.py | py | 2,408 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros... |
17444702079 | from mimetypes import init
from turtle import forward
from torch import conv2d
import torch
import torch.nn as nn
import common
#from model import common
import torch.nn.functional as F
import math
import cv2
import os
import datetime
import scipy.io as io
import numpy as np
def EzConv(in_channel,out_channel,kernel_size):
return nn.Conv2d(in_channels=in_channel,out_channels=out_channel,kernel_size=kernel_size,stride=1,padding=kernel_size//2,bias=True)
class Upsample(nn.Sequential):
def __init__(self, scale, n_feats, bn=False, act=False, bias=True, conv=EzConv):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsample, self).__init__(*m)
class CA(nn.Module):
'''CA is channel attention'''
def __init__(self,n_feats,kernel_size=3,bias=True, bn=False, act=nn.ReLU(True),res_scale=1,conv=EzConv,reduction=16):
super(CA, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(n_feats, n_feats // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(n_feats // reduction, n_feats, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.body(x)
CA = self.conv_du(y)
CA = torch.mul(y, CA)
x = CA + x
return x
class SCconv(nn.Module):
def __init__(self,n_feats,kernel_size,pooling_r):
super(SCconv,self).__init__()
self.half_feats = n_feats//2
self.f1 = nn.Sequential(
nn.Conv2d(n_feats//2,n_feats//2,kernel_size,padding=kernel_size//2),
nn.ReLU(True)
)
self.f2 = nn.Sequential(
nn.AvgPool2d(kernel_size=pooling_r,stride=pooling_r),
nn.Conv2d(n_feats//2,n_feats//2,kernel_size,padding=kernel_size//2),
)
self.f3 = nn.Conv2d(n_feats//2,n_feats//2,kernel_size,padding=kernel_size//2)
self.f4 = nn.Sequential(
nn.Conv2d(n_feats//2,n_feats//2,kernel_size,padding=kernel_size//2),
nn.ReLU(True)
)
def forward(self,x):
x1 = x[:, 0:self.half_feats, :, :]
x2 = x[:, self.half_feats:, :, :]
identity_x1 = x1
out_x1 = torch.sigmoid(torch.add(identity_x1,F.interpolate(self.f2(x1),identity_x1.size()[2:])))
out_x1 = torch.mul(self.f3(x1),out_x1)
out_x1 = self.f4(out_x1)
out_x2 = self.f1(x2)
out = torch.cat([out_x1,out_x2],dim=1)
return out
class SSELB(nn.Module):
def __init__(self,n_feats,kernel_size,pooling_r):
super(SSELB,self).__init__()
self.body = nn.Sequential(
SCconv(n_feats,kernel_size,pooling_r),
CA(n_feats),
)
def forward(self,x):
res = self.body(x)
return res + x
class NGIM(nn.Module):
def __init__(self,n_feats,scale):
super(NGIM,self).__init__()
if scale == 4:
self.TrunkUp = nn.Sequential(
nn.ConvTranspose2d(n_feats,n_feats,kernel_size=8,stride=4,padding=2),
nn.PReLU(n_feats)
)
self.MultiUp = nn.Sequential(
nn.Conv2d(n_feats*3,n_feats//2,kernel_size=3,padding=1),
nn.Conv2d(n_feats//2,n_feats,kernel_size=3,padding=1),
nn.ConvTranspose2d(n_feats,n_feats,kernel_size=8,stride=4,padding=2),
nn.PReLU(n_feats)
)
elif scale == 8:
self.TrunkUp = nn.Sequential(
nn.ConvTranspose2d(n_feats,n_feats,kernel_size=12,stride=8,padding=2),
nn.PReLU(n_feats)
)
self.MultiUp = nn.Sequential(
nn.Conv2d(n_feats*3,n_feats//2,kernel_size=3,padding=1),
nn.Conv2d(n_feats//2,n_feats,kernel_size=3,padding=1),
nn.ConvTranspose2d(n_feats,n_feats,kernel_size=12,stride=8,padding=2),
nn.PReLU(n_feats)
)
self.error_resblock = nn.Sequential(
nn.Conv2d(n_feats,n_feats,kernel_size=3,padding=1),
)
def forward(self,xl,xi,xn):
h1 = self.TrunkUp(xi)
h2 = self.MultiUp(torch.cat([xl,xi,xn],dim=1))
e = h2 - h1
e = self.error_resblock(e)
h1 = h1 + e
return h1
class SSELM(nn.Module):
def __init__(self, n_colors, n_feats, n_blocks,pooling_r):
super(SSELM, self).__init__()
kernel_size = 3
self.head = nn.Conv2d(n_colors, n_feats, kernel_size,padding=kernel_size//2)
body = []
for i in range(n_blocks):
body.append(SSELB(n_feats,kernel_size,pooling_r))
self.body = nn.Sequential(*body)
#self.recon = nn.Conv2d(n_feats, n_colors, kernel_size=3,padding=kernel_size//2)
def forward(self, x):
x = self.head(x)
y = self.body(x) + x
return y
class GELIN(nn.Module):
def __init__(self,n_feats,n_colors,kernel_size,pooling_r,n_subs, n_ovls,blocks,scale):
super(GELIN,self).__init__()
# calculate the group number (the number of branch networks)
# 向上取整计算组的数量 G
self.n_feats = n_feats
self.n_subs = n_subs
self.G = math.ceil((n_colors - n_ovls) / (n_subs - n_ovls))
# calculate group indices
self.start_idx = []
self.end_idx = []
self.scale = scale
for g in range(self.G):
sta_ind = (n_subs - n_ovls) * g
end_ind = sta_ind + n_subs
if end_ind > n_colors:
end_ind = n_colors
sta_ind = n_colors - n_subs
# 把每一组的开始 idx 与结束 idx 存入 list
self.start_idx.append(sta_ind)
self.end_idx.append(end_ind)
self.branch = SSELM(n_subs,n_feats,blocks,pooling_r)
self.branch_up = NGIM(n_feats,scale)
self.branch_recon = nn.Conv2d(n_feats, n_subs, kernel_size=3,padding=kernel_size//2)
def forward(self,x,lms):
b, c, h, w = x.shape
m = []
y = torch.zeros(b, c, h*self.scale, w*self.scale).cuda()
channel_counter = torch.zeros(c).cuda()
for g in range(self.G):
sta_ind = self.start_idx[g]
end_ind = self.end_idx[g]
xi = x[:, sta_ind:end_ind, :, :]
xi = self.branch(xi)
m.append(xi)
for g in range(self.G):
sta_ind = self.start_idx[g]
end_ind = self.end_idx[g]
if g==0:
xl = m[self.G-1]
xi = m[g]
xn = m[g+1]
elif g==self.G-1:
xl = m[g-1]
xi = m[g]
xn = m[0]
else:
xl = m[g-1]
xi = m[g]
xn = m[g+1]
xi = self.branch_up(xl,xi,xn)
xi = self.branch_recon(xi)
y[:, sta_ind:end_ind, :, :] += xi
# 用 channel_counter 记录某一个位置被加了几次,然后再除这个数字取平均
channel_counter[sta_ind:end_ind] = channel_counter[sta_ind:end_ind] + 1
# intermediate “result” is averaged according to their spectral indices
y = y / channel_counter.unsqueeze(1).unsqueeze(2)
y = y + lms
return y
| HuQ1an/GELIN_TGRS | Ours.py | Ours.py | py | 8,839 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Conv2d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"lin... |
4944678267 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# encoding: utf-8
#客户端调用,用于查看API返回结果
import json
import os
import hashlib
import json
import platform
def getSysType():
sysSystem = platform.system()
if sysSystem == 'Windows': #mac系统
return 'win'
elif sysSystem == 'Darwin':
return 'mac'
elif sysSystem == 'Linux':
return 'linux'
def isSignOK(msgdic,secretkey):
data = json.dumps(msgdic['data']) + str(msgdic['time']) + secretkey
csgin = msgdic['sign']
if csgin == 'test':
return True
sgin = hashlib.sha256(data.encode("utf8")).hexdigest().upper()
if csgin == sgin:
return True
else:
return False
def signMsg(msgdic,ptime,secretkey):
if type(msgdic) == str:
data = msgdic + str(ptime) + secretkey
sgin = hashlib.sha256(data.encode("utf8")).hexdigest().upper()
return sgin
else:
data = json.dumps(msgdic) + str(ptime) + secretkey
sgin = hashlib.sha256(data.encode("utf8")).hexdigest().upper()
return sgin | fengmm521/okex_robot | util/signTool.py | signTool.py | py | 1,077 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "platform.system",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "hashlib.sha256",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "hashlib.sha256",
"line_... |
27541033350 | import re
from Bio import AlignIO, SeqIO
from Bio.Seq import Seq
import argparse
# This section defines the input options. You can copy and modify this section for future scripts, it's very handy! In that case don't forget to import the argparse package (import argparse)
# Get arguments
def add_arguments(parser):
parser.add_argument(
'--input',
required=True,
default=None,
help='Path to your input alignment file.'
)
parser.add_argument(
'--input_format',
choices=["fasta","nexus","stockholm","phylip","clustal","emboss","phylip-sequential","phylip-relaxed","fasta-m10","ig","maf"],
required=True,
help='Alignment format of input file.'
)
parser.add_argument(
'--output',
required=True,
help='Name of output alignment.'
)
parser.add_argument(
'--output_format',
choices=["fasta","nexus","stockholm","phylip","clustal","phylip-sequential","phylip-relaxed","maf"],
required=True,
help='Desired alignment format of output file.'
)
parser.add_argument(
'--fix_invalid_characters',
action='store_true',
default=False,
help='Replace all invalid bases (not A,C,T,G,a,c,t,g,-) with "N"'
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
# !!!!!!!!!!!!NOTE:!!!!!!!!!!!!!! You can call the input variables in the following manner: args.name_of_variable,
# e.g. input_file = args.input, input_format = args.input_format, etc.
#_________________________________________________
# Insert a function here, which replaces all invalid characters with N's
def replace_bad_chars(alignment):
'...your code here...'
#_________________________________________________
# 1. read the alignment, using the AlignIO.read() function
alignment=AlignIO.read()
# 2. apply the function you defined above, in case the fix_invalid_characters option is activated
if args.fix_invalid_characters:
replace_bad_chars(alignment)
# 3. write the alignment to the defined output file (args.output) using the SeqIO.write() function
SeqIO.write()
print('\n\nNew alignment written to file %s\n\n' %args.output) | tandermann/python_for_biologists | data/alignment_formatter.py | alignment_formatter.py | py | 2,210 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "Bio.AlignIO.read",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "Bio.AlignIO",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.write"... |
34412858770 | # -- coding: utf-8 --`
import os
import argparse
import json
import botocore
import boto3
def main(args):
cfg = botocore.config.Config(retries={'max_attempts': 0}, read_timeout=900, connect_timeout=900)
args = {k: v for k, v in args.items() if v is not None}
try:
lambda_client = boto3.client('lambda', config=cfg)
s3_resource = boto3.resource('s3')
results = {}
for num in range(args['n']):
if num == args['limit']:
break
response = lambda_client.invoke(
FunctionName=args['lambda'],
InvocationType='RequestResponse',
Payload=json.dumps(args)
)
json_dict = json.loads(response['Payload'].read().decode('utf-8'))
if json_dict['statusCode'] == 200:
if 'bucket' in json_dict['body']:
s3_resource.Bucket(
json_dict['body']['bucket']).download_file(json_dict['body']['output'],
os.path.join(args['save'], json_dict['body']['output'])
)
else:
break
results[num] = json_dict
print(json.dumps(results, indent=2))
except Exception as e:
print(e)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Lambda Functionn
parser.add_argument("--lambda", type=str, required=True, help="Lambda Function Name")
# randomizer params
parser.add_argument("--seed", type=int, default=None, help="random seed for generating consistent images per prompt")
# scheduler params
parser.add_argument("--beta_start", type=float, default=None, help="LMSDiscreteScheduler::beta_start")
parser.add_argument("--beta_end", type=float, default=None, help="LMSDiscreteScheduler::beta_end")
parser.add_argument("--beta_schedule", type=str, default=None, help="LMSDiscreteScheduler::beta_schedule")
# diffusion params
parser.add_argument("--num_inference_steps", type=int, default=None, help="num inference steps")
parser.add_argument("--guidance_scale", type=float, default=None, help="guidance scale")
parser.add_argument("--eta", type=float, default=None, help="eta")
# prompt
parser.add_argument("--prompt", type=str, default="Street-art painting of Tower in style of Banksy, photorealism", help="prompt")
# img2img params
parser.add_argument("--init_image", type=str, default=None, help="path to initial image")
parser.add_argument("--strength", type=float, default=None, help="how strong the initial image should be noised [0.0, 1.0]")
# inpainting
parser.add_argument("--mask", type=str, default=None, help="mask of the region to inpaint on the initial image")
# output name
parser.add_argument("--output", type=str, default=None, help="output image prefix")
# loop
parser.add_argument("--n", type=int, default=1, help="Loop Count")
# loop Limit
parser.add_argument("--limit", type=int, default=100, help="Max Loop Count")
# dist dir
parser.add_argument("--save", type=str, default="./", help="save dir")
main(vars(parser.parse_args()))
| densenkouji/stable_diffusion.openvino.lambda | demo.py | demo.py | py | 3,184 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "botocore.config.Config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "botocore.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "boto3.client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "boto3.resource... |
17895317270 | from typing import Any, Mapping, Optional, Union, Tuple
from flax.core import scope as flax_scope
import jax
import jax.numpy as jnp
import t5x.models as t5x_models
from models import gp_models # local file import from baselines.t5
from models import models as ub_models # local file import from baselines.t5
Array = t5x_models.Array
class EncoderDecoderBEClassifierModel(ub_models.EncoderDecoderClassifierModel):
"""A wrapper of EncoderDecoderClassifierModel to support BatchEnsemble loss."""
def loss_fn(
self,
params,
batch,
dropout_rng,
):
target_tokens = batch['decoder_target_tokens']
# Tile the labels for batch ensembles.
ens_size = self.module.ens_size
batch['decoder_target_tokens'] = jnp.tile(target_tokens, [ens_size] + [1] *
(target_tokens.ndim - 1))
loss_weights = batch['decoder_loss_weights']
if loss_weights is not None:
batch['decoder_loss_weights'] = jnp.tile(loss_weights, [ens_size] + [1] *
(loss_weights.ndim - 1))
return super().loss_fn(params, batch, dropout_rng)
def _compute_argmax_score(
self,
params: t5x_models.PyTreeDef,
batch: Mapping[str, jnp.ndarray],
return_intermediates: bool = False,
dropout_rng: Optional[jnp.ndarray] = None,
ensemble_probs: bool = True,
) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:
"""Compute class logits on a batch."""
ens_size = self.module.ens_size
sequence_scores = super()._compute_argmax_score(params, batch,
return_intermediates,
dropout_rng, ensemble_probs)
if return_intermediates:
sequence_scores, intermediates = sequence_scores
if ens_size > 1:
sequence_scores = jnp.reshape(sequence_scores,
(ens_size, -1) + sequence_scores.shape[1:]) # pytype: disable=attribute-error # jax-ndarray
if ensemble_probs:
# Computes log(mean(exp(logits))) along the first dimension.
sequence_scores = (
jax.nn.logsumexp(sequence_scores, axis=0) - jnp.log(ens_size))
else:
sequence_scores = jnp.mean(sequence_scores, axis=0)
if return_intermediates:
return sequence_scores, intermediates
return sequence_scores
def _compute_logits_from_slice(self,
decoding_state,
params,
encoded_inputs,
raw_inputs,
max_decode_length,
rngs=None,
ensemble_probs=True):
"""Token slice to logits from decoder model."""
ens_size = self.module.ens_size
k = jax.tree_util.tree_flatten(params)[0][0].shape[0]
flat_logits, new_cache = super()._compute_logits_from_slice(
decoding_state, params, encoded_inputs, raw_inputs, max_decode_length,
rngs, ensemble_probs)
if ens_size > 1:
flat_logits = jnp.reshape(flat_logits,
(k, ens_size, -1) + flat_logits.shape[1:])
if ensemble_probs:
flat_logits = (
jax.nn.logsumexp(flat_logits, axis=1) - jnp.log(ens_size))
else:
flat_logits = jnp.mean(flat_logits, axis=1)
flat_logits = jnp.reshape(flat_logits, (-1,) + flat_logits.shape[2:])
return flat_logits, new_cache
class EncoderDecoderBEGpClassifierModel(EncoderDecoderBEClassifierModel,
gp_models.EncoderDecoderGPModel):
"""A wrapper of EncoderDecoderClassifierModel for BatchEnsemble and GP."""
def loss_fn(
self,
params,
batch,
dropout_rng,
):
target_tokens = batch['decoder_target_tokens']
# Tile the labels for batch ensembles.
ens_size = self.module.ens_size
batch['decoder_target_tokens'] = jnp.tile(target_tokens, [ens_size] + [1] *
(target_tokens.ndim - 1))
loss_weights = batch['decoder_loss_weights']
if loss_weights is not None:
batch['decoder_loss_weights'] = jnp.tile(loss_weights, [ens_size] + [1] *
(loss_weights.ndim - 1))
return gp_models.EncoderDecoderGPModel.loss_fn(self, params, batch,
dropout_rng)
def get_initial_variables(
self,
rng: jnp.ndarray,
input_shapes: Mapping[str, Array],
input_types: Optional[Mapping[str, jnp.dtype]] = None
) -> flax_scope.FrozenVariableDict:
initial_variables = gp_models.EncoderDecoderGPModel.get_initial_variables(
self, rng=rng, input_shapes=input_shapes, input_types=input_types)
return initial_variables
class EncoderDecoderBEBeamScoreModel(ub_models.EncoderDecoderBeamScoreModel, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
EncoderDecoderBEClassifierModel):
"""A wrapper of EncoderDecoderClassifierModel to support BatchEnsemble loss."""
def loss_fn(self, params, batch, dropout_rng):
return EncoderDecoderBEClassifierModel.loss_fn(self, params, batch,
dropout_rng)
def _compute_argmax_score(
self,
params: t5x_models.PyTreeDef,
batch: Mapping[str, jnp.ndarray],
return_intermediates: bool = False,
dropout_rng: Optional[jnp.ndarray] = None,
ensemble_probs: bool = True,
) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:
"""Compute class logits on a batch."""
return EncoderDecoderBEClassifierModel._compute_argmax_score(
self,
params,
batch,
return_intermediates=return_intermediates,
dropout_rng=dropout_rng,
ensemble_probs=ensemble_probs)
def _compute_logits_from_slice(self,
decoding_state,
params,
encoded_inputs,
raw_inputs,
max_decode_length,
rngs=None,
ensemble_probs=True):
"""Token slice to logits from decoder model."""
return EncoderDecoderBEClassifierModel._compute_logits_from_slice(
self,
decoding_state,
params,
encoded_inputs,
raw_inputs,
max_decode_length,
rngs=rngs,
ensemble_probs=ensemble_probs)
class EncoderDecoderBEGpBeamScoreModel(EncoderDecoderBEBeamScoreModel, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
EncoderDecoderBEGpClassifierModel):
"""A wrapper of EncoderDecoderBeamScoreModel for BatchEnsemble and GP."""
def loss_fn(self, params, batch, dropout_rng):
return EncoderDecoderBEGpClassifierModel.loss_fn(self, params, batch,
dropout_rng)
def get_initial_variables(
self,
rng: jnp.ndarray,
input_shapes: Mapping[str, Array],
input_types: Optional[Mapping[str, jnp.dtype]] = None
) -> flax_scope.FrozenVariableDict:
initial_variables = EncoderDecoderBEGpClassifierModel.get_initial_variables(
self, rng=rng, input_shapes=input_shapes, input_types=input_types)
return initial_variables
| google/uncertainty-baselines | baselines/t5/models/be_models.py | be_models.py | py | 7,534 | python | en | code | 1,305 | github-code | 36 | [
{
"api_name": "t5x.models.Array",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "t5x.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.models.EncoderDecoderClassifierModel",
"line_number": 12,
"usage_type": "attribute"
},
{
"a... |
28438842371 | from aiohttp import web
import sys
import os
import argparse
import simplejson as json
import functools
import sqlite3
import threading
import requests
import datetime
import dateutil.parser
import simplejson as json
from collections import defaultdict
from routes import routes
from calculation.task import Task
from calculation.kit import Kit
from calculation.backtesting import Backtesting
from bucket import Bucket
from ticker import Ticker
from chart import Chart
from constants import *
def check_results(app):
try:
bucket = app.bucket
req = requests.post(app.engine_endpoint + "/check_results")
result = json.loads(req.text)
db = sqlite3.connect(DB_PATH)
save_db = False
for key in result:
value = result[key]
startedAt = None
finishedAt = None
_type = None
if value['startedAt'] != None:
startedAt = dateutil.parser.isoparse(value['startedAt'])
if value['finishedAt'] != None:
finishedAt = dateutil.parser.isoparse(value['finishedAt'])
if value['type'] != None:
_type = value['type']
record = {
"id" : key,
"startedAt" : startedAt,
"finishedAt" : finishedAt,
"status" : value['status']
}
if _type == 'model_calculation':
task = Task(db, app.db_lock)
task.update_status(record)
save_db = True
elif _type == 'generate_inputs':
kit = Kit(db, app.db_lock)
kit.update_status(record)
save_db = True
elif _type == 'backtesting':
backtesting = Backtesting(db, app.db_lock)
backtesting.update_status(record)
save_db = True
if save_db:
bucket.write(DB_PATH, DB_PATH)
except Exception as e:
print('[tick]: ', e)
def initialize():
try:
app = web.Application()
parser = argparse.ArgumentParser()
parser.add_argument('--ee', help="engine endpoint")
args = parser.parse_args()
for route in routes:
app.router.add_route(route[0], route[1], route[2])
if args.ee == None:
app.engine_endpoint = ENGINE_ENDPOINT
else:
app.engine_endpoint = args.ee
print("ENDPOINT: ", app.engine_endpoint)
bucket = Bucket()
bucket.read(DB_PATH, DB_PATH)
app.bucket = bucket
app.db = sqlite3.connect(DB_PATH)
cursor = app.db.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS task("
"id INTEGER PRIMARY KEY, "
"uuid TEXT, kit TEXT, product TEXT, kitName TEXT, createdAt TIMESTAMP, startedAt TIMESTAMP, finishedAt TIMESTAMP, status TEXT)")
cursor.execute(
"CREATE TABLE IF NOT EXISTS kit("
"id INTEGER PRIMARY KEY, "
"uuid TEXT, name TEXT, createdAt TIMESTAMP, startedAt TIMESTAMP, finishedAt TIMESTAMP, status TEXT)"
)
cursor.execute(
"CREATE TABLE IF NOT EXISTS backtesting("
"id INTEGER PRIMARY KEY, "
"createdAt TIMESTAMP, startedAt TIMESTAMP, finishedAt TIMESTAMP, status TEXT)"
)
cursor.execute(
"CREATE TABLE IF NOT EXISTS backtesting_task("
"id INTEGER PRIMARY KEY,"
"backtestingId INTEGER, taskId INTEGER)"
)
app.db.commit()
app.db_lock = threading.Lock()
app.task_update = Ticker(app, 5.0, check_results)
app.task_update.start()
return app
except Exception as e:
print('[ws]: could not initialize', str(e))
if __name__ == "__main__":
app = initialize()
web.run_app(app)
app.task_update.stop()
| kuris996/ws | main.py | main.py | py | 3,882 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parser... |
42164236500 | #!/usr/bin/env python
"""
Provide image saving service. The callback is function wait_for_message. The message is transferred to cv and then saved.
imageSave:
request has two attributes, path and number (sequence number)
response has a single bool type of whether image saving success.
When passing request to the service proxy, one can just pass in each request variable of request. The return will be imageSaveResponse. The result is in imageSaveResponse.success !
Saving numpy array costs to many spaces. Somehow the stored vale doesnot seem to be
correct. Saving the array as 16 bit png with properly chosen range might be a good option. Irrelavent depth point around the table are ruled out. Mind that the depth image array from cvbridge is not writable.
#cv2_img = self.bridge.imgmsg_to_cv2(image_msg, "passthrough")
#n_channels = 3
#dtype = np.dtype('uint8')
#dtype.newbyteorder('>' if image_msg.is_bigendian else '<')
#img = np.ndarray(
# shape=(image_msg.height, image_msg.width, n_channels),
# dtype=dtype,
# buffer=image_msg.data)
If no baseline or distortion coefficient is set, the image does not need to be rectified or undistorted.
"""
import rospy
import numpy as np
from sensor_msgs.msg import Image, CompressedImage, CameraInfo
from poke_gazebo.srv import imageSave
from cv_bridge import CvBridge, CvBridgeError
import cv2
class poke_image_saver(object):
def __init__(self):
self.bridge = CvBridge()
self.s = rospy.Service('/model_image_saver/image_save',
imageSave,
self.handle_image_save)
print("image saving service ready...")
"""
cam_info = rospy.wait_for_message('/my_gripper/camera1/rgb/camera_info',
CameraInfo)
self.K = np.array(cam_info.K)
self.D = np.array(cam_info.D)
"""
def handle_image_save(self, req):
try:
image_msg = rospy.wait_for_message(
'/my_gripper/camera1/rgb/image_raw/compressed',
CompressedImage,
0.1)
depth_msg = rospy.wait_for_message(
'/my_gripper/camera1/depth/image_raw',
Image,
0.1)
image_msg_2 = rospy.wait_for_message(
'/my_gripper/camera1/rgb/image_raw/compressed',
CompressedImage,
0.1)
cv2_img = self.bridge.compressed_imgmsg_to_cv2(image_msg_2, "bgr8")
cv2_depth = self.bridge.imgmsg_to_cv2(depth_msg, "passthrough")
except (rospy.ROSException, CvBridgeError) as e:
print(e)
success = False
# if no exceptions, else clause is executed.
else:
#img_rectified = cv2.undistort(cv2_img, self.K.reshape(3,3), self.D)
cv2.imwrite(req.path+'img'+'%04d'%req.number+'.jpg', cv2_img)
# bigger objects and surrounded table: 0.315 - 0.532
depth_temp = cv2_depth.copy()
depth_temp[depth_temp>0.9] = 0.532
depth_temp = (depth_temp - 0.315)/(0.532-0.315)*65535
depth_temp_int = np.array(depth_temp, np.uint16)
cv2.imwrite(
req.path+'depth'+'%04d'%req.number+'.png', depth_temp_int)
success = True
# finally clause is executed regardless of exceptions.
finally:
return {'success':success}
if __name__ == '__main__':
rospy.init_node('model_image_saver')
image_saver = poke_image_saver()
rospy.spin()
| wuyang3/workspace | catkin_ws/src/poke_gazebo/scripts/poke_image_saver.py | poke_image_saver.py | py | 3,559 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv_bridge.CvBridge",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "rospy.Service",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "poke_gazebo.srv.imageSave",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "rospy.... |
38192341046 | import decimal
from django.conf import settings
from django.contrib import messages
from django.db.models import Sum
from django.shortcuts import render, redirect, get_object_or_404
from ..models import *
from ..forms import *
from ..models import *
from django.contrib.auth.decorators import login_required
@login_required(login_url='/user-authentication/')
def student_semester_course(request):
if not request.user.is_staff:
get_semester = AcademicSemester.objects.filter(is_active=True).first()
get_student = Registration.objects.filter(student__user=request.user, semester=get_semester).first()
get_structure = ProgrammeCourseStructure.objects.filter(programme=get_student.student.programme,
semester=get_semester.semester, level=get_student.level)
total_credit = ProgrammeCourseStructure.objects.filter(programme=get_student.student.programme,
semester=get_semester.semester,
level=get_student.level).aggregate(
total=Sum('credit'))
context = {
'semester': get_semester,
'registration': get_student,
'structure': get_structure,
'credit': total_credit,
}
return render(request, 'KCHS/student/student_course_semester.html', context)
else:
return redirect('KCHS:logout')
def student_semester_payment(request):
if not request.user.is_staff:
get_semester = AcademicSemester.objects.filter(is_active=True).first()
get_student = Registration.objects.filter(student__user=request.user, semester=get_semester).first()
get_payment = Payment.objects.filter(registration=get_student).order_by('account', '-id')
get_direct_due = Payment.objects.filter(registration=get_student, account__bank="CRDB").order_by('-id').first()
get_other_due = Payment.objects.filter(registration=get_student, account__bank="NMB").order_by('-id').first()
try:
get_debt = get_direct_due.due + get_other_due.due
except:
get_debt = ""
context = {
'semester': get_semester,
'registration': get_student,
'structure': get_payment,
'due': get_debt,
}
return render(request, 'KCHS/student/student_semester_payment.html', context)
else:
return redirect('KCHS:logout')
def student_semester_result(request):
if not request.user.is_staff:
get_semester = AcademicSemester.objects.filter(is_active=True).first()
get_student = Registration.objects.filter(student__user=request.user, semester=get_semester).first()
get_result = SemesterResult.objects.filter(registration=get_student, academic_semester=get_semester).order_by(
'academic_semester__semester', )
# get_direct_due = Payment.objects.filter(registration=get_student,account__bank="CRDB").order_by('-id').first()
# get_other_due = Payment.objects.filter(registration=get_student,account__bank="NMB").order_by('-id').first()
# get_debt =get_direct_due.due + get_other_due.due
# total_credit = ProgrammeCourseStructure.objects.filter(programme=get_student.student.programme,
# semester=get_semester.semester,level=get_student.level).aggregate(
# total=Sum('credit'))
context = {
'registration': get_student,
'result': get_result,
# 'due': get_debt,
}
return render(request, 'KCHS/student/student_semester_exam_result.html', context)
else:
return redirect('KCHS:logout')
def student_programme_payment_structure(request):
get_semester = AcademicSemester.objects.get(is_active=True)
get_student = get_object_or_404(Registration, student__user=request.user)
get_payment_structure = PaymentStructure.objects.filter(programme=get_student.student.programme,
level=get_student.level, semester=get_semester.semester)
get_crdb = BankAccount.objects.filter(bank="CRDB").first()
get_nmb = BankAccount.objects.filter(bank="NMB").first()
get_boa = BankAccount.objects.filter(bank="BOA").first()
get_payment_semester_one = FeeStructure.objects.filter(programme=get_student.student.programme,
semester__number="1",
level=get_student.level)
get_payment_semester_two = FeeStructure.objects.filter(programme=get_student.student.programme,
semester__number="2",
level=get_student.level)
context = {
'semester': get_semester,
'registration': get_student,
'programme': get_student.student.programme,
'structure': get_payment_structure,
'sem1': get_payment_semester_one,
'sem2': get_payment_semester_two,
'nmb': get_nmb,
'crdb': get_crdb,
'boa': get_boa,
}
return render(request, 'KCHS/student/student_payment_structure.html', context)
def user_profile(request):
try:
get_student = get_object_or_404(Registration, student__user=request.user)
except:
get_student = request.user
context = {
'student': get_student,
}
return render(request, 'KCHS/student/user_profile.html', context)
#
# def programme_course_structure(request, programme_name, level_name):
# get_programme = Programme.objects.get(name=programme_name)
# get_programme_level = get_object_or_404(Level, name=level_name)
#
# get_courses_semester_one = ProgrammeCourseStructure.objects.filter(programme=get_programme, semester__number="1",
# level=get_programme_level)
# get_courses_semester_two = ProgrammeCourseStructure.objects.filter(programme=get_programme, semester__number="2",
# level=get_programme_level)
# context = {
# 'programme': get_programme,
# 'level': get_programme_level,
# 'sem1': get_courses_semester_one,
# 'sem2': get_courses_semester_two,
#
# }
#
# return render(request, 'KCHS/academic/programme_course_structure.html', context)
#
#
# def course_assessment_group(request):
# get_group = GroupAssessment.objects.all().values('group', 'group__description', 'group__name').distinct()
# get_item = GroupAssessment.objects.all()
# print(get_group)
#
# context = {
# 'group': get_group,
# 'item': get_item
# }
#
# return render(request, 'KCHS/academic/group_list.html', context)
#
#
# def course_list(request):
# get_course = ProgrammeCourseStructure.objects.all()
# get_group = GroupAssessment.objects.all().values('group__id', 'group__description', 'group__name').distinct()
# get_item = GroupAssessment.objects.all().order_by('category')
#
# context = {
# 'course': get_course,
# 'group': get_group,
# 'item': get_item
# }
#
# return render(request, 'KCHS/academic/course_assessment_structure.html', context)
#
#
# def department_tutor_list(request):
# get_tutors = User.objects.all()
# # get_group = GroupAssessment.objects.all().values('group__id', 'group__description', 'group__name').distinct()
# # get_item = GroupAssessment.objects.all().order_by('category')
#
# context = {
# 'tutor': get_tutors,
#
# }
#
# return render(request, 'KCHS/academic/tutors_list.html', context)
def department_semester_student_list(request):
get_student = Registration.objects.all()
# get_group = GroupAssessment.objects.all().values('group__id', 'group__description', 'group__name').distinct()
# get_item = GroupAssessment.objects.all().order_by('category')
context = {
'student': get_student,
}
return render(request, 'KCHS/academic/student_list.html', context)
| luggiestar/kahama | KCHS/views/student_views.py | student_views.py | py | 8,479 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Sum",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 38,
"usage_type": "call"
},
{
"api_name":... |
21213270872 | import pandas as pd
from tqdm import tqdm
from Database_comparator.config_class import cfg
import os
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio.Blast.Applications import NcbimakeblastdbCommandline
import Database_comparator.Fasta_maker as Fasta_maker
import Database_comparator.db_aligner as db_aligner
FastaSeparator = "!"
class blast:
"""
The blast class provides methods for performing BLAST (Basic Local Alignment Search Tool) searches
and analyzing the results.
It can create BLAST databases, perform BLAST searches with provided query sequences, and analyze the
search results, including inserting matching results into the input DataFrame based on the provided
configuration.
"""
def __init__(self, config: cfg, aligner: db_aligner.aligner) -> None:
self.config = config
self.aligner = aligner
# PUBLIC Blast algorithm
def blast_database_info(self):
"""
Print information about inserted databases for blast search.
Note:
This method prints information about the databases used for BLAST searches, including matrix, gap penalties,
neighboring words threshold, and window size for multiple hits.
"""
print("Inserted databases:")
for database in self.config.in_blast_database:
print(database)
print("-----------------------------------------------------")
print("Matrix: BLOSUM62")
print("GaPenalties: Existence: 11, Extension: 1")
print("Neighboring words threshold: 11")
print("Window for multiple hits: 40")
def blast_make_database(self, name = "Database", force=False):
"""
Create a BLAST database with the given name.
Args:
name (str): Name of the BLAST database.
force (bool): Whether to overwrite an existing database and make a new fasta file for all databases (default is False).
Note:
This method creates a BLAST database, including the generation of a fasta file from the provided data
configurations and specified name.
"""
if not os.path.exists("Fasta_files"):
os.mkdir("./Fasta_files")
if not os.path.exists("./Query_files"):
os.mkdir("./Query_files")
fasta_file_name = f"Fasta_files/BLAST_fasta_file.fasta"
if not os.path.exists(fasta_file_name) or force:
fasta_maker = Fasta_maker.Fasta_maker(
data_dfs=[self.config.load_database(database_index = i) for i in range(len(self.config.data_info))],
sequence_column_names=[self.config.data_info[i]["sequence_column_name"] for i in range(len(self.config.data_info))],
identifiers=[self.config.data_info[i]["identifier_of_seq"] for i in range(len(self.config.data_info))],
result_columns=[self.config.data_info[i]["results_column"] for i in range(len(self.config.data_info))],
output_file_name=fasta_file_name,
separator=FastaSeparator)
fasta_maker.make_file()
cline = NcbimakeblastdbCommandline(dbtype="prot", input_file=fasta_file_name, input_type="fasta", title=name, max_file_sz="2GB", out=self.config.blast_database_full_name)
try: cline()
except: raise Exception("Error in creating blast database")
def blast_search_for_match_in_database(self, query=None):
"""
Perform a BLAST search in a BLAST database.
Args:
query (str or None): Path to the query sequence file (default is None, uses default input query).
Note:
This method performs a BLAST search against the specified database, using the provided query sequence
or the default input query.
"""
print(f"Blasting against {self.config.blast_database_full_name}")
if query is None:
fasta_maker = Fasta_maker.Fasta_maker(
data_dfs=[self.config.input_df],
sequence_column_names=[self.config.input_file_info["sequence_column_name"]],
identifiers=[],
result_columns=[],
output_file_name=self.config.blast_default_input_query,
separator=FastaSeparator)
fasta_maker.make_query()
query = self.config.blast_default_input_query
blast_in = query
blast_out = self.config.blast_output_name
blastp_cline = NcbiblastpCommandline(query=blast_in, db=self.config.blast_database_full_name, evalue=self.config.e_value, outfmt=self.config.blast_outfmt, out=blast_out)
try: blastp_cline()
except: raise Exception(f"Error in blasting against database {self.config.blast_database_full_name}")
print(f"Blasting done. Output: {blast_out}")
print("-" * 200)
def blast_search_and_analyze_matches_in_database(self, query=None) -> pd.DataFrame:
"""
Perform a BLAST search in a BLAST database and then analyze the output data.
Args:
query (str or None): Path to the query sequence file (default is None, uses default input query).
Note:
This method combines BLAST search with the analysis of the search results, including inserting
matching results into the input DataFrame.
"""
self.blast_search_for_match_in_database(query)
self.blast_analyze_output_data()
return self.config.input_df.copy(deep=True)
def blast_analyze_output_data(self) -> pd.DataFrame:
"""
Analyze the output data from a BLAST search and insert results into the input DataFrame.
Note:
This method analyzes the output data from a previous BLAST search and inserts matching results
into the input DataFrame using the specified aligner and configuration settings.
"""
self.config.reset_before_analysis()
columns_names = self.config.blast_outfmt.split()
data = pd.read_csv(self.config.blast_output_name, sep="\t", names=columns_names[1:])
data_df = pd.DataFrame(data).drop_duplicates(ignore_index=True)
for i in tqdm(range(len(data_df)), desc="Analyzing BLAST output data with aligner", colour="green"):
if self.aligner.align_sequences(data_df["qseq"][i], data_df["sseq"][i]):
self.__insert_blast_results_to_input_df(data_df, i)
return self.config.input_df.copy(deep=True)
# PRIVATE Blast algorithm
def __insert_blast_results_to_input_df(self, data_df: pd.DataFrame, index):
"""
Insert BLAST results into the input DataFrame.
Args:
data_df (pd.DataFrame): Data frame containing BLAST results.
index (int): Index of the result to be inserted.
Note:
This private method processes and inserts BLAST search results into the input DataFrame based on the
specified configuration settings.
"""
input_seq_index = int(str(data_df["qseqid"][index]).replace("seq", ""))
labels = data_df["sseqid"][index].split(sep="!")
sseq = str(data_df["sseq"][index])
if len(labels) == 2:
file_name, output_seq_identifier = labels[0], labels[1]
else:
file_name = labels[0]
output_seq_identifier = labels[0]
output_seq_identifier = ";".join(set(output_seq_identifier.split(sep=";")))
database_index = self.config.find_database_index(filename=file_name)
if pd.isnull(self.config.input_df[self.config.data_info[database_index]["results_column"]][input_seq_index]):
self.config.input_df.loc[input_seq_index, self.config.data_info[database_index]["results_column"]] = f"[seq: {sseq} identifier:{output_seq_identifier}]" + self.config.separator_of_results_in_input_df
else:
self.config.input_df.loc[input_seq_index, self.config.data_info[database_index]["results_column"]] = \
self.config.input_df[self.config.data_info[database_index]["results_column"]][input_seq_index] + f"[seq: {sseq} identifier:{output_seq_identifier}]" + self.config.separator_of_results_in_input_df
# ------------------------------------------------------------------------------------------------
| preislet/Database_comparator | Database_comparator/db_blast.py | db_blast.py | py | 8,294 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Database_comparator.config_class.cfg",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "Database_comparator.db_aligner.aligner",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "Database_comparator.db_aligner",
"line_number": 23,
"usa... |
74498162984 | import streamlit as st
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
# Chargement du dataset Iris et Random Forest Classifier
iris = datasets.load_iris()
x = iris.data
y = iris.target
foret=RandomForestClassifier()
foret.fit(x,y)
# Creation de l'application de prediction titree et en tete
st.title("Application de prévision des fleurs d'iris")
st.header("Cette application predit la catégorie des fleurs d'iris")
# saisir les longueurs et largeurs de sepal et petal à laide de la fonction Slider() de streamlit
st.sidebar.header("Les parametres d'entrée des fleurs d'iris")
sepal_length = st.sidebar.slider('Sepal length',4.3,7.9,5.3)
sepal_width = st.sidebar.slider('Sepal width',2.0,4.4,3.3)
petal_length = st.sidebar.slider('petal length',1.0,6.9,2.3)
petal_width = st.sidebar.slider('petal width',0.1,2.5,1.3)
st.sidebar.write('This App is creat by MP_NDIAYE')
data = {'sepal_length':sepal_length,
'sepal_width':sepal_width,
'petal_length':petal_length,
'petal_width':petal_width}
df = pd.DataFrame(data,index=[0])
# Affichage des parametres saisi par l'utilisateur dans un subheader cad paragraphe
st.subheader('On veut trouver la catégorie de cette fleur')
st.write(df)
# Definition du boutton de prediction et afichage du valeur de fleur predit
if(st.button("Submit")):
prediction = foret.predict(df)
st.write("la catégorie de la fleur d'iris est:",iris.target_names[prediction]) | MamadouPNDIAYE/ML_App | iris.py | iris.py | py | 1,538 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 10,
"usage_type": "call"
},
{
... |
7152283168 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import ctypes
import sys
import bpy
import time
import io
import numpy as np
import timeit
from . holoplay_service_api_commands import *
from . import cbor
from . import cffi
hardwareVersion = None
def ensure_site_packages(packages):
""" `packages`: list of tuples (<import name>, <pip name>) """
if not packages:
return
import site
import importlib.util
sys.path.append(site.getusersitepackages())
modules_to_install = [module[1] for module in packages if not importlib.util.find_spec(module[0])]
if modules_to_install:
import subprocess
if bpy.app.version < (2,91,0):
python_binary = bpy.app.binary_path_python
else:
python_binary = sys.executable
subprocess.run([python_binary, '-m', 'ensurepip'], check=True)
subprocess.run([python_binary, '-m', 'pip', 'install', *modules_to_install, "--user"], check=True)
def send_message(sock, inputObj):
import pynng
from . import cbor
out = cbor.dumps(inputObj)
print("---------------")
print("Command (" + str(len(out)) + " bytes, "+str(len(inputObj['bin']))+" binary): ")
print(inputObj['cmd'])
print("---------------")
sock.send(out)
# Driver will respond with a CBOR-formatted error message / information packet
response = sock.recv()
print("Response (" + str(len(response)) + " bytes): ")
response_load = cbor.loads(response)
print(response_load)
print("---------------")
return response_load
def send_quilt(sock, quilt, duration=10):
print("===================================================")
print("Sending quilt to HoloPlay Service")
wm = bpy.context.window_manager
aspect = wm.aspect
W = wm.quiltX
H = wm.quiltY
vx = wm.tileX
vy = wm.tileY
vtotal = vx*vy
from PIL import Image, ImageOps
start_time = timeit.default_timer()
print("Show a single quilt for " + str(duration) + " seconds, then wipe.")
print("===================================================")
# we need to get the data from a Blender image datablock because this is where we would put the image aquired from OpenGL
# in the live view solution
img0 = quilt
W,H = img0.size
# pre-allocate numpy array for better performance
px0 = np.zeros(H*W*4, dtype=np.float32)
# foreach_get is probably the fastest method to aquire the pixel values from a Blender image datablock
img0.pixels.foreach_get(px0)
print("Reading image from Blender image datablock: %.6f" % (timeit.default_timer() - start_time))
# we need to convert the floats to integers from 0-255 for most image formats like PNG or BMP which can be send to HoloPlay Service
# np.multiply(px0, 255, out=px0, casting="unsafe")
px0 = px0 * 255
pixels=px0.astype(np.uint8, order="C")
pimg_time = timeit.default_timer()
# for some reason the following only works when we create a PIL Image from a bytes-stream
# so we need to convert the numpy array to bytes and read that
pimg = Image.frombytes("RGBA", (W,H), pixels.tobytes())
# the result is flipped, probably due to numpy, flip it back
pimg_flipped = ImageOps.flip(pimg)
print("Converting pixels to bytes-stream and flipping took: %.6f" % (timeit.default_timer() - pimg_time))
# the idea is that we convert the PIL image to a simple file format HoloPlay Service / stb_image can read
# and store it in a BytesIO object instead of disk
output = io.BytesIO()
pimg_flipped.convert('RGBA').save(output, 'BMP')
# the contents of the BytesIO object becomes our blob we send to HoloPlay Service
blob = output.getvalue()
settings = {'vx': vx,'vy': vy,'vtotal': vtotal,'aspect': aspect}
send_message(sock, show_quilt(blob, settings))
print("Reading quilt from Blender image datablock and sending it to HoloPlay Service took: %.6f" % (timeit.default_timer() - start_time))
# def send_quilt_from_np(sock, quilt, W=4096, H=4096, duration=10):
def send_quilt_from_np(sock, quilt, W=4096, H=4096, duration=10):
print("===================================================")
print("Sending quilt to HoloPlay Service")
wm = bpy.context.window_manager
aspect = wm.aspect
W = wm.quiltX
H = wm.quiltY
vx = wm.tileX
vy = wm.tileY
vtotal = vx*vy
from PIL import Image, ImageOps
start_time = timeit.default_timer()
# we get the data from the live view as numpy array
px0 = quilt
#W,H = px0.size
# we need to convert the 0-1 floats to integers from 0-255
# np.multiply(px0, 255, out=px0, casting="unsafe")
pixels=px0.astype(np.uint8, order="C")
pimg_time = timeit.default_timer()
# for some reason the following only works when we create a PIL Image from a bytes-stream
# so we need to convert the numpy array to bytes and read that
pimg = Image.frombytes("RGBA", (W,H), pixels.tobytes())
# the result is flipped, probably due to numpy, flip it back
pimg_flipped = ImageOps.flip(pimg)
print("Converting pixels to bytes-stream and flipping took: %.6f" % (timeit.default_timer() - pimg_time))
# the idea is that we convert the PIL image to a simple file format HoloPlay Service / stb_image can read
# and store it in a BytesIO object instead of disk
output = io.BytesIO()
pimg_flipped.convert('RGBA').save(output, 'BMP')
# the contents of the BytesIO object becomes our blob we send to HoloPlay Service
blob = output.getvalue()
settings = {'vx': vx,'vy': vy,'vtotal': vtotal,'aspect': aspect}
send_message(sock, show_quilt(blob, settings))
print("Reading quilt from numpy array and sending it to HoloPlay Service took in total: %.6f" % (timeit.default_timer() - start_time))
def init():
global hp
global sock
global numDevices
global screenW
global screenH
global aspect
global hardwareVersion
print("Init Settings")
start_time = timeit.default_timer()
wm = bpy.context.window_manager
ws_url = "ws://localhost:11222/driver"
driver_url = "ipc:///tmp/holoplay-driver.ipc"
ensure_site_packages([
("pynng","pynng"),
("PIL", "Pillow")
])
import pynng
# This script should work identically whether addr = driver_url or addr = ws_url
addr = driver_url
sock = pynng.Req0(recv_timeout=2000)
try:
sock.dial(addr, block = True)
except:
print("Could not open socket. Is driver running?")
sock = None
return False
response = send_message(sock, {'cmd':{'info':{}},'bin':''})
if response != None:
# create a dictionary with an index for this device
devices = response['devices']
if devices == []:
print("No Looking Glass devices found")
else:
print("Reading settings from device")
screenW = devices[0]['calibration']['screenW']['value']
screenH = devices[0]['calibration']['screenH']['value']
quiltX = devices[0]['defaultQuilt']['quiltX']
quiltY = devices[0]['defaultQuilt']['quiltY']
tileX = devices[0]['defaultQuilt']['tileX']
tileY = devices[0]['defaultQuilt']['tileY']
hardwareVersion = devices[0]['hardwareVersion'] # not storing this in wm because we need to change this to support multiple devices in the future
aspect = screenW / screenH
wm.screenW = screenW
wm.screenH = screenH
wm.aspect = aspect
wm.quiltX = quiltX
wm.quiltY = quiltY
wm.tileX = tileX
wm.tileY = tileY
if hardwareVersion == 'portrait':
wm.viewX = 420
wm.viewY = 560
wm.quiltX = 3360
wm.quiltY = 3360
# print(devices)
# print(hardwareVersion)
wm.numDevicesConnected = 1 # temporarily support only one device due to the way we globally store vars in the wm
print("Number of devices found: " + str(wm.numDevicesConnected))
class looking_glass_reconnect_to_holoplay_service(bpy.types.Operator):
""" Reconnects to Holoplay Service """
bl_idname = "lookingglass.reconnect_to_holoplay_service"
bl_label = "Reconnect to Service"
bl_description = "Re-Initializes the connection to HoloPlay Service"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
init()
return {'FINISHED'}
| Looking-Glass/blenderLKG | looking_glass_tools/looking_glass_settings.py | looking_glass_settings.py | py | 9,262 | python | en | code | 34 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "site.getusersitepackages",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "importlib.util... |
6353439339 | import json
import psycopg2
def lambda_handler(event, context):
db_host = "database-2.cjcukgskbtyu.ap-southeast-2.rds.amazonaws.com"
db_user = "postgres"
db_password = "sengpsql" # Please don't hack us
db_name = "database2"
db_port = 5432
conn = psycopg2.connect(
host=db_host,
user=db_user,
password=db_password,
dbname=db_name,
port=db_port
)
report_id = event["report_id"]
if not report_id.isdigit():
raise Exception("Error: invalid report_id")
curr = conn.cursor()
curr.execute(f"""
select *
from reports
where report_id = '{report_id}'
""")
report = curr.fetchone()
if not report:
raise Exception("Error: not a valid report ID")
report_json = {
"report_id": report[0],
"article_id": report[1],
"report": report[2],
}
res = {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps(report_json)
}
return res
| SENG3011-megAPIxels/interactive-outbreak-predictor | PHASE_1/API_SourceCode/parser/report.py | report.py | py | 1,078 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "psycopg2.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 46,
"usage_type": "call"
}
] |
18973862172 | #!d:/python/python.exe
# *********************************************************************
# Program to submit the interim and full export publishing job.
# ---------------------------------------------------------------------
# Created: 2007-04-03 Volker Englisch
# *********************************************************************
import sys
import cdr
import os
import time
from argparse import ArgumentParser
from cdrapi import db
# Setting the host variable to submit the link for the error report
# -----------------------------------------------------------------
host = cdr.APPC
url = 'https://%s' % host
# Setting directory and file names
# --------------------------------
PUBPATH = os.path.join('d:\\cdr', 'publishing')
# PUBPATH = os.path.join('d:\\home', 'venglisch', 'cdr', 'publishing')
TIER = cdr.Tier().name
MAX_RETRIES = 10
RETRY_MULTIPLIER = 5.0
wait = 60 # number of seconds to wait between status checks
# The performance of the publishing job has greatly improved allowing
# us to cancel a running job much sooner if it fails to finish.
# Optionally overriden below once we know the publishing subset.
# --------------------------------------------------------------------
if cdr.isProdHost():
waitTotal = 10800 # 3.0 hours
elif cdr.isDevHost():
waitTotal = 10800 # 3.0 hours
else:
waitTotal = 14400 # 4.0 hours
testMode = None
fullMode = None
session = cdr.login("cdroperator", cdr.getpw("cdroperator"))
pubSystem = 'Primary'
pubEmail = cdr.getEmailList('Operator Publishing Notification')
# ------------------------------------------------------------
# Custom argument parser (so we can log errors).
# ------------------------------------------------------------
class Parser(ArgumentParser):
JOBMODES = (
("interim", "run in interim (nightly) production mode"),
("export", "run in export (weekly) production mode"),
)
RUNMODES = (
("testmode", "run in TEST mode"),
("livemode", "run in LIVE mode"),
)
MODES = JOBMODES, RUNMODES
def __init__(self):
ArgumentParser.__init__(self)
for modes in self.MODES:
group = self.add_mutually_exclusive_group(required=True)
for option, help in modes:
args = f"--{option}", f"--{option[0]}"
opts = dict(action="store_true", help=help)
group.add_argument(*args, **opts)
def error(self, message):
LOGGER.error(message)
self.print_help(sys.stderr)
sys.stderr.write(f"\n{message}\n")
sys.exit(1)
# ---------------------------------------------------------------
# Function to check the job status of the submitted publishing
# job.
# ---------------------------------------------------------------
def checkJobStatus(jobId):
# Defensive programming.
tries = MAX_RETRIES
while tries:
try:
conn = db.connect(timeout=300)
cursor = conn.cursor()
cursor.execute("""\
SELECT id, status, started, completed, messages
FROM pub_proc
WHERE id = %d""" % int(jobId))
row = cursor.fetchone()
# We can stop trying now, we got it.
tries = 0
except Exception:
LOGGER.exception("*** Failure connecting to DB ***")
LOGGER.info("*** Unable to check status for PubJob%s", jobId)
waitSecs = (MAX_RETRIES + 1 - tries) * RETRY_MULTIPLIER
LOGGER.info(" RETRY: %d retries left; waiting %f seconds",
tries, waitSecs)
time.sleep(waitSecs)
tries -= 1
if not row:
raise Exception("*** (3) Tried to connect %d times. No Pub Job-ID." %
MAX_RETRIES)
return row
# ---------------------------------------------------------------
# Function to set the job status to failure
# On occasion, the publishing job fails to finish (network
# connectivity issues?) and will get cancelled once the max time
# allowed is reached.
# This function sets the job status to 'Failure' so that the job
# status isn't preventing new jobs from being processed because
# only one single job (per job type) is allowed to run at a time.
#
# If testing this function for a job that *not* actually failed be
# prepared that the status gets set to 'Failure' but will be
# updated again - possibly to 'Success' - at the end of the
# "not-really-failed" publishing job.
# ---------------------------------------------------------------
def statusPubJobFailure(jobId):
# Defensive programming.
tries = MAX_RETRIES
row = cursor = None
while tries:
try:
conn = db.connect(timeout=300)
cursor = conn.cursor()
cursor.execute("""\
SELECT id, status, started, completed, messages
FROM pub_proc
WHERE id = %d""" % int(jobId))
row = cursor.fetchone()
LOGGER.info("Job%d status: %s", row[0], row[1])
# We can stop trying now, we got it.
tries = 0
except Exception:
LOGGER.exception("*** Failure connecting to DB ***")
LOGGER.warning("*** Unable to set job status to 'Failure'.")
LOGGER.info("*** PubJob%d", jobId)
waitSecs = (MAX_RETRIES + 1 - tries) * RETRY_MULTIPLIER
LOGGER.info(" RETRY: %d retries left; waiting %f seconds",
tries, waitSecs)
time.sleep(waitSecs)
tries -= 1
if cursor is None:
raise Exception("Unable to connect to the database")
# Setting the job status to 'Failure' rather than leaving it as
# 'In process'. That way a new job won't fail until the job
# status has been manually updated.
# -------------------------------------------------------------
try:
cursor.execute("""\
UPDATE pub_proc
SET status = 'Failure'
WHERE id = %d
AND status = 'In process'""" % int(jobId))
conn.commit()
except Exception:
LOGGER.exception("*** Failure updating job status ***")
LOGGER.info("*** Manually set the job status to 'Failure'.")
LOGGER.info("*** PubJob%s", jobId)
if not row:
raise Exception("*** (3) Tried to connect %d times. No Pub Job-ID." %
MAX_RETRIES)
return row
# --------------------------------------------------------------
# Function to find the job ID of the push job.
# --------------------------------------------------------------
def getPushJobId(jobId):
# Defensive programming.
tries = MAX_RETRIES
time.sleep(15)
while tries:
try:
conn = db.connect()
cursor = conn.cursor()
cursor.execute("""\
SELECT id, status, started, completed
FROM pub_proc
WHERE id > %d
AND pub_system = 178
AND (pub_subset LIKE '%%_Interim-Export'
OR
pub_subset LIKE '%%_Export')
""" % int(jobId))
row = cursor.fetchone()
# If the SELECT returns nothing a push job was not submitted
# because another job is still pending.
# Otherwise the push job may already have completed.
# -----------------------------------------------------------
if row is None:
LOGGER.error("*** Error - No push job waiting. "
"Check for pending job")
cursor.execute("""\
SELECT id, messages
FROM pub_proc
WHERE id = %d""" % int(jobId))
row = cursor.fetchone()
LOGGER.info("%s", row[1])
raise Exception("No push job waiting")
# We can stop trying now, we got it.
tries = 0
except Exception:
LOGGER.exception("*** Failure connecting to DB ***")
LOGGER.info("*** Unable to find status for PushJob%s", jobId)
waitSecs = (MAX_RETRIES + 1 - tries) * RETRY_MULTIPLIER
LOGGER.info(" RETRY: %d retries left; waiting %f seconds",
tries, waitSecs)
time.sleep(waitSecs)
tries -= 1
if not row:
raise Exception("*** (1) Tried to connect %d times. No Push Job-ID." %
MAX_RETRIES)
return row[0]
# ---------------------------------------------------------------------
# Function to send an email when the job fails instead of silently
# exiting.
# ---------------------------------------------------------------------
def sendFailureMessage(header="*** Error ***", body=""):
emailDL = cdr.getEmailList('Test Publishing Notification')
subject = header
if not body:
body = """
The publishing job failed. Please check the log files.
"""
opts = dict(subject=subject, body=body)
cdr.EmailMessage(cdr.OPERATOR, emailDL, **opts).send()
return
# ---------------------------------------------------------------------
# Function to check if an Interim job is already underway (maybe it runs
# longer then 24 hours or it has been started manually).
# Note:
# We may want to change the SQL query to make sure a weekly export can
# not be started if a nightly job hasn't finished yet.
# ---------------------------------------------------------------------
def checkPubJob():
# Defensive programming.
tries = MAX_RETRIES
while tries:
try:
conn = db.connect()
cursor = conn.cursor()
cursor.execute("""\
SELECT id, pub_subset, status, started, completed
FROM pub_proc
WHERE status not in ('Failure', 'Success')
AND pub_system = 178
AND pub_subset LIKE '%%Export' """)
row = cursor.fetchone()
return row or 0
except Exception:
LOGGER.exception("*** Failure checking for running jobs ***")
waitSecs = (MAX_RETRIES + 1 - tries) * RETRY_MULTIPLIER
LOGGER.info(" RETRY: %d retries left; waiting %f seconds",
tries, waitSecs)
time.sleep(waitSecs)
tries -= 1
raise Exception(f"*** (2) Giving up after {MAX_RETRIES:d} times.")
# ---------------------------------------------------------------------
# Instantiate the Log class
# ---------------------------------------------------------------------
LOGGER = cdr.Logging.get_logger("PubJob", console=True)
LOGGER.info("SubmitPubJob - Started")
LOGGER.info('Arguments: %s', sys.argv)
opts = Parser().parse_args()
testMode = opts.testmode
fullMode = opts.export
# Based on the command line parameter passed we are submitting a
# interim publishing job or a full export
# ---------------------------------------------------------------
if fullMode:
pubSubset = 'Export'
else:
pubSubset = 'Interim-Export'
override = cdr.getControlValue("Publishing", f"{pubSubset}-wait-seconds")
try:
override = int(override)
waitTotal = override
except Exception:
pass
try:
# Before we start we need to check if a publishing job is already
# underway. It could be in the process of publishing or pushing.
# We do not allow two jobs of the same job type to run simultanously.
# Also, if a publishing job ran but the push job failed the
# initiated push job would fail with a message 'Push job pending'.
# ---------------------------------------------------------------
LOGGER.info("Checking job queue ...")
# checkPubJob will exit if another job is already running
# -------------------------------------------------------
currentJobs = checkPubJob()
if currentJobs:
LOGGER.error("\n%s job is still running.", pubSubset)
LOGGER.error("Job%s status: %s", currentJobs[0], currentJobs[2])
LOGGER.error("Job%s type : %s", currentJobs[0], currentJobs[1])
raise Exception("Job%s still in process (%s: %s)" %
(currentJobs[0], pubSubset, currentJobs[2]))
LOGGER.info(" OK to submit")
# Submitting publishing job. If an integer job ID is returned
# we continue. Otherwise, submitting the job failed and we exit.
# ---------------------------------------------------------------
LOGGER.info("Submitting publishing job ...")
submit = cdr.publish(session, pubSystem, pubSubset, email=pubEmail)
if submit[0] is None:
LOGGER.error("*** Failure starting publishing job ***")
LOGGER.error("%s", submit[1])
sys.exit(1)
else:
LOGGER.info("Pub job started as Job%s", submit[0])
LOGGER.info("Waiting for publishing job to complete ...")
# We started the publishing job. Now we need to wait until
# publishing (and pushing) is complete before we exit the
# program. Otherwise the following SQL Server Agent steps
# would start without the data being ready.
# Checking the status every minute
# ---------------------------------------------------------
done = 0
counter = 0
while not done:
time.sleep(wait)
counter += 1
jobInfo = checkJobStatus(submit[0])
status = jobInfo[1]
messages = jobInfo[4]
# Don't print every time we're checking (every 15 minutes)
# ---------------------------------------------------------
if counter % 15 == 0:
LOGGER.info(" Status: %s (%d sec)", status, counter*wait)
if counter * wait > waitTotal:
hours = waitTotal / (60 * 60)
LOGGER.error("*** Publishing job failed to finish!!!")
LOGGER.error("*** Completion exceeded maximum time allowed")
LOGGER.error("*** Cancelled after %s hours", hours)
subject = "Publishing Failure: Max time exceeded"
msgBody = """
The publishing job failed. It did not finish within the maximum time
allowed.
"""
sendFailureMessage(subject, msgBody)
statusPubJobFailure(submit[0])
sys.exit(1)
# Once the publishing job completed with status Success
# we need to find the push job and wait for it to finish
# We will continue after both jobs completed with Success.
# --------------------------------------------------------
if status in ('Verifying', 'Success'):
LOGGER.info("Publishing job started at %s", jobInfo[2])
LOGGER.info(" and completed at %s", jobInfo[3])
try:
pushId = getPushJobId(submit[0])
LOGGER.info("Push job started as Job%s", pushId)
except Exception:
LOGGER.exception("*** Failed to submit Push job for Job%s",
submit[0])
sys.exit(1)
pdone = 0
pcounter = 0
# Waiting for the push job to finish
# -----------------------------------
while not pdone:
time.sleep(wait)
pcounter += 1
jobInfo = checkJobStatus(pushId)
pstatus = jobInfo[1]
if pcounter % 15 == 0:
args = pstatus, pcounter * wait
LOGGER.info(" Status: %s (%d sec)", *args)
if pstatus in ('Verifying', 'Success'):
pdone = 1
LOGGER.info(" Pushing job started at %s", jobInfo[2])
LOGGER.info(" and completed at %s", jobInfo[3])
elif pstatus == 'Failure':
LOGGER.error("*** Push job failed at %s", jobInfo[3])
LOGGER.info(" Status: %s", pstatus)
LOGGER.info("%s", jobInfo[4])
sys.exit(1)
else:
pdone = 0
done = 1
elif status == 'Failure':
LOGGER.error("*** Error - Publication job failed")
LOGGER.error("... %s", messages[-500:])
subj = "*** Publishing Failure: The current job did not succeed!"
msgBody = """
The publishing job started but did not complete successfully.
See logs below:
---------------
%s
""" % messages[-500:]
sendFailureMessage(subj, msgBody)
sys.exit(1)
else:
done = 0
try:
# Submitting the email notification including the error report
# The mail is send two different groups depending if it's a
# nightly or a weekly publishing job
# ------------------------------------------------------------
if fullMode:
emailDL = cdr.getEmailList('Weekly Publishing Notification')
addSubj = 'Weekly'
else:
emailDL = cdr.getEmailList('Nightly Publishing Notification')
addSubj = 'Nightly'
# If we're not running in production we want to avoid sending
# these email messages to the users. Overwriting the emailDL
# group to a developers/testers list or recipients
# -----------------------------------------------------------
if not TIER == 'PROD':
emailDL = cdr.getEmailList('Test Publishing Notification')
args = TIER, addSubj
subject = '[%s] Status and Error Report for %s Publishing' % args
emailDL.sort()
if not len(emailDL):
emailDL = cdr.getEmailList("Developers Notification")
subject = '*** DL Missing *** %s' % subject
LOGGER.warning('*** Warning: No Email DL found')
message = """\
Status and Error reports for the latest %s publishing/push jobs:
Publishing Job Report:
%s/cgi-bin/cdr/PubStatus.py?id=%s
Push Job Report:
%s/cgi-bin/cdr/PubStatus.py?id=%s
""" % (addSubj.lower(), url, submit[0], url, pushId)
opts = dict(subject=subject, body=message)
cdr.EmailMessage(cdr.OPERATOR, emailDL, **opts).send()
LOGGER.info("Submitting Email: OK")
except Exception:
LOGGER.exception("*** Error sending email ***")
raise
except Exception as arg:
LOGGER.exception("*** Standard Failure")
subject = '[%s] *** SubmitPubJob.py - Standard Failure' % TIER
msgBody = "The publishing job failed: %s" % arg
sendFailureMessage(subject, msgBody)
sys.exit(0)
| NCIOCPL/cdr-publishing | Publishing/SubmitPubJob.py | SubmitPubJob.py | py | 18,579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cdr.APPC",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cdr.Tier",
"line_number":... |
18735035904 | import scrapy
from Lab5_1.SeleniumRequest import SeleniumRequest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from Lab5_1.items import Lab51Item
class AirbnbSpider(scrapy.Spider):
name = "airbnb"
allowed_domains = ["airbnb.com.ua"]
start_urls = ["https://www.airbnb.com.ua/"]
def start_requests(self):
for url in self.start_urls:
yield SeleniumRequest(
url=url,
callback=self.parse,
wait_time=10
)
def parse(self, response):
for item in response.css('div.c4mnd7m'):
url = item.css('a.l1j9v1wn::attr(href)').get()
yield Lab51Item(
url=url
)
| Ivan7281/Lab-Data-Scraping | Lab5_1/Lab5_1/spiders/airbnb.py | airbnb.py | py | 762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "Lab5_1.SeleniumRequest.SeleniumRequest",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Lab5_1.items.Lab51Item",
"line_number": 24,
"usage_type": "call"
}
] |
37412163105 | from copy import deepcopy
import h5py
import numpy as np
import pytest
import six
from ...util.functions import virtual_file
from .. import (CartesianGrid,
CylindricalPolarGrid,
SphericalPolarGrid,
AMRGrid,
OctreeGrid)
ALL_GRID_TYPES = ['car', 'sph', 'cyl', 'amr', 'oct']
def exc_msg(exc):
if isinstance(exc.value, six.string_types):
return exc.value
elif type(exc.value) is tuple:
return exc.value[0]
else:
return exc.value.args[0]
class TestView(object):
def setup_method(self, method):
# Set up grids
self.grid = {}
self.grid['car'] = CartesianGrid([-1., 1.],
[-2., 2.],
[-3., 3.])
self.grid['cyl'] = CylindricalPolarGrid([0., 1.],
[-1., 1.],
[0., 2. * np.pi])
self.grid['sph'] = SphericalPolarGrid([0., 1.],
[0., np.pi],
[0., 2. * np.pi])
self.grid['amr'] = AMRGrid()
level = self.grid['amr'].add_level()
grid = level.add_grid()
grid.xmin, grid.xmax = -1., 1.
grid.ymin, grid.ymax = -1., 1.
grid.zmin, grid.zmax = -1., 1.
grid.nx, grid.ny, grid.nz = 8, 8, 8
refined = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.grid['oct'] = OctreeGrid(0., 0., 0., 10., 10., 10.,
np.array(refined).astype(bool))
# Set up empty grid class
self.grid_empty = {}
self.grid_empty['car'] = CartesianGrid
self.grid_empty['cyl'] = CylindricalPolarGrid
self.grid_empty['sph'] = SphericalPolarGrid
self.grid_empty['amr'] = AMRGrid
self.grid_empty['oct'] = OctreeGrid
# Set up initial densities
self.density = {}
self.density['car'] = np.array([[[1.]]])
self.density['cyl'] = np.array([[[1.]]])
self.density['sph'] = np.array([[[1.]]])
amr_q = deepcopy(self.grid['amr'])
amr_q.levels[0].grids[0].quantities['density'] = np.ones((8, 8, 8))
self.density['amr'] = amr_q['density']
self.density['oct'] = np.ones(len(refined))
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_empty(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust is None
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_single(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 1
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
else:
assert type(h.quantities['density']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_double(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 2
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
else:
assert type(h.quantities['density']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_double_multiple(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g['density'].append(self.density[grid_type])
g['energy'] = []
g['energy'].append(self.density[grid_type])
g['energy'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 2
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
assert type(h.levels[0].grids[0].quantities['energy']) is list
else:
assert type(h.quantities['density']) is list
assert type(h.quantities['energy']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_type_mismatch(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
f['Geometry'].attrs['grid_type'] = 'invalid'.encode('utf-8')
h = self.grid_empty[grid_type]()
with pytest.raises(Exception) as exc:
h.read(f)
if grid_type == 'car':
assert exc.value.args[0] == "Grid is not cartesian"
elif grid_type == 'cyl':
assert exc.value.args[0] == "Grid is not cylindrical polar"
elif grid_type == 'sph':
assert exc.value.args[0] == "Grid is not spherical polar"
elif grid_type == 'amr':
assert exc.value.args[0] == "Grid is not an AMR grid"
elif grid_type == 'oct':
assert exc.value.args[0] == "Grid is not an octree"
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_hash_mismatch(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
f['Geometry'].attrs['geometry'] = 'a4e2805a72dfcf01b2fd94da0be32511'.encode('utf-8')
h = self.grid_empty[grid_type]()
with pytest.raises(Exception) as exc:
h.read(f)
assert exc.value.args[0] == "Calculated geometry hash does not " \
"match hash in file"
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_groups_exist(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
f.create_group('Geometry')
f.create_group('Quantities')
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
assert h.n_dust == 1
| hyperion-rt/hyperion | hyperion/grid/tests/test_io.py | test_io.py | py | 6,744 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "six.string_types",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"lin... |
38676635360 | # -*- coding: utf-8 -*-
"""
Created on Thu May 4 10:51:36 2017
@author: FunkyBlack
"""
import numpy as np
import matplotlib.pyplot as plt
def obj_fun(A, b, x):
#==============================================================================
# define the test function
# T T
# T -b x -x x
# f(x) = x A x + e + e
#
#==============================================================================
f_x = np.dot(np.dot(x.T, A), x) + np.exp(np.dot(-b.T, x)) + np.exp(np.dot(-x.T, x))
return f_x
def grad_fun(A, b, x):
# Compute the gradient
g_x = 2 * np.dot(A, x) - np.exp(np.dot(-b.T, x)) * b - 2 * np.exp(np.dot(-x.T, x)) * x
return g_x
def hess_fun(A, b, x):
# Compute the Hessian matrix
G_x = 2 * A + np.exp(np.dot(-b.T, x)) * np.dot(b, b.T) - 2 * np.exp(np.dot(-x.T, x)) * np.eye(5) + 4 * np.exp(np.dot(-x.T, x)) * np.dot(x, x.T)
return G_x
def LevMar(A, b):
Max_iter = 100
epsilon = 1e-8
x = np.array([[0],[0],[0],[0],[0]])
mu = 0.01
x_plot = []
y_plot = []
for i in range(Max_iter):
f_x = obj_fun(A=A, b=b, x=x)
g_x = grad_fun(A=A, b=b, x=x)
G_x = hess_fun(A=A, b=b, x=x)
x_plot.append(i)
y_plot.append(f_x[0])
if np.sum(np.abs(g_x)) < epsilon:
break
EigVal = np.linalg.eigvals(G_x + mu * np.eye(5))
while(np.all(EigVal > 0) == False):
mu = 4 * mu
EigVal = np.linalg.eigvals(G_x + mu * np.eye(5))
s = np.dot(-np.linalg.inv(G_x + mu * np.eye(5)), g_x)
f_x_new = obj_fun(A=A, b=b, x=x+s)
delta_q = np.dot(g_x.T, s) + 0.5 * np.dot(np.dot(s.T, G_x), s)
r = (f_x_new - f_x) / delta_q
if r < 0.25:
mu = 4 * mu
elif r > 0.75:
mu = mu / 2
if r > 0:
x = x + s
plt.plot(x_plot, y_plot, 'ko', x_plot, y_plot, 'r')
xlims = plt.xlim()
ylims = plt.ylim()
plt.xlim(xlims[0]-0.5, xlims[1]+0.5)
plt.ylim(ylims[0]-0.1, ylims[1]+0.1)
plt.title('Levenberg-Marquardt Method')
plt.xlabel('Iteration')
plt.ylabel('Objective value')
plt.text(3.5, 1.8, '$f(x)=x^{T}Ax+e^{-b^{T}x}+e^{-x^{T}x}$', fontsize=15)
return x, f_x[0]
if __name__ == '__main__':
A = np.array([[5,-1,2,0,0],
[-1,4,1,-1,0],
[2,1,6,4,0],
[0,-1,4,7,0],
[0,0,0,0,0.5]])
b = np.array([[2],
[1],
[3],
[5],
[10]])
x_star, f_x_star = LevMar(A=A, b=b)
print ("x* = ", x_star, "\nvalue = ", f_x_star)
plt.savefig('LM_iterations.png') | FunkyBlack/CSMath-homeworks | Homework4/levenberg_marquardt.py | levenberg_marquardt.py | py | 2,756 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.dot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 23,
... |
18431033624 | import copy
import random
import math
from fastapi import HTTPException
from nltk.tree import Tree
from schemas.paraphrasing_schemas import TreeParaphraseList, TreeParaphrase
class TreeParaphrasingService:
def __init__(self, tree_str: str):
self.tree = Tree.fromstring(tree_str)
def count_tree_permutations(self, tree: Tree) -> int:
if tree.height() <= 3:
return 1
counter = sum([1 for elem in tree if elem.label() == "NP"])
if counter >= 2:
return math.factorial(counter) * math.prod(
self.count_tree_permutations(elem) for elem in tree if elem.label() == "NP"
)
return math.prod(self.count_tree_permutations(elem) for elem in tree)
def shuffle_np_subtrees(self, tree: Tree) -> None:
# check for trees with no children or leaves only
if tree.height() < 3:
return
if tree.label() == "NP":
np_subtrees = [branch for branch in tree if branch.label() == "NP"]
if len(np_subtrees) >= 2:
labels = [child.label() for child in tree[0:]]
if labels.count('NP') >= 2 and ('CC' in labels or ',' in labels):
random.shuffle(np_subtrees)
for i, val in enumerate(tree):
if val.label() == "NP":
tree[i] = np_subtrees.pop()
for child in tree:
self.shuffle_np_subtrees(child)
def find_all_tree_permutations(self) -> list[str]:
tree_permutations = set()
num_permutations = self.count_tree_permutations(tree=self.tree)
while len(tree_permutations) < num_permutations:
new_tree = copy.deepcopy(self.tree)
self.shuffle_np_subtrees(new_tree)
new_permutation = " ".join(str(new_tree).split())
tree_permutations.add(new_permutation)
return list(tree_permutations)
def paraphrase_syntactic_tree(self, limit: int) -> TreeParaphraseList:
tree_permutations = self.find_all_tree_permutations()[:limit]
if not tree_permutations:
raise HTTPException(status_code=404, detail='No paraphrases exist for this tree')
paraphrase_results = [TreeParaphrase(tree=tree_permutation) for tree_permutation in tree_permutations]
return TreeParaphraseList(paraphrases=paraphrase_results)
| Cerne13/paraphrasing-test-task | services/paraphrasing_service.py | paraphrasing_service.py | py | 2,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.tree.Tree.fromstring",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.tree.Tree",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "nltk.tree.Tree",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "math.factoria... |
39553984143 | import numpy as np
import cv2
import os
import time
from process_img import grab_screen
from direct_keys import PressKey, ReleaseKey, W, S, A, D
from getkeys import key_check
from tensorflow.keras.models import load_model
from global_var import *
def straight():
PressKey(W)
ReleaseKey(A)
ReleaseKey(D)
def left():
PressKey(A)
ReleaseKey(W)
ReleaseKey(D)
def right():
PressKey(D)
ReleaseKey(W)
ReleaseKey(A)
def predictions_to_direct(predictions):
direct = np.argmax(predictions[0])
if direct == 0:
left()
elif direct == 1:
straight()
elif direct == 2:
right()
def main():
paused = True
model = load_model('my_model.h5')
for i in range(3, 0, -1):
print(i)
time.sleep(1)
while(True):
if not paused:
last_time = time.time()
screen, lines = grab_screen()
screen = screen[200:] / 255.0
screen = cv2.resize(screen, (FINAL_WIDTH, FINAL_HEIGHT))
screen = screen.reshape([-1, FINAL_HEIGHT, FINAL_WIDTH, 1])
predictions = model.predict([screen])
#print(predictions)
# SHOWING LINES (UNNECESSARY)
clear_screen = grab_screen(process=False)
if not paused:
try:
for line in lines:
cords = line[0]
if cords[1] > 200 or cords[3] > 200:
cv2.line(clear_screen, (cords[0], cords[1]), (cords[2], cords[3]), [0, 255, 0], 3)
except:
pass
cv2.imshow('Window', clear_screen)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
# SHOWING LINES (UNNECESSARY) END
if not paused:
predictions_to_direct(predictions)
fps = 1 / (time.time() - last_time)
print(f'{round(fps)} FPS')
keys = key_check()
if 'Y' in keys:
if paused:
paused = False
time.sleep(1)
else:
paused = True
time.sleep(1)
ReleaseKey(A)
ReleaseKey(W)
ReleaseKey(D)
if __name__ == '__main__':
main()
| MendelDamian/Using-AI-to-follow-path-in-minecraft | main.py | main.py | py | 2,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "direct_keys.PressKey",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "direct_keys.W",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "direct_keys.ReleaseKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "direct_... |
856491497 | #!/usr/bin/env python
from pyhesity import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-c', '--clustername', type=str, default=None)
parser.add_argument('-mcm', '--mcm', action='store_true')
parser.add_argument('-i', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-np', '--noprompt', action='store_true')
parser.add_argument('-m', '--mfacode', type=str, default=None)
parser.add_argument('-e', '--emailmfacode', action='store_true')
parser.add_argument('-n', '--objectname', action='append', type=str)
parser.add_argument('-l', '--objectlist', type=str, default=None)
parser.add_argument('-ex', '--exclude', action='store_true')
parser.add_argument('-j', '--jobname', type=str, required=True)
parser.add_argument('-sd', '--storagedomain', type=str, default='DefaultStorageDomain')
parser.add_argument('-p', '--policyname', type=str, default=None)
parser.add_argument('-tz', '--timezone', type=str, default='US/Eastern')
parser.add_argument('-st', '--starttime', type=str, default='21:00')
parser.add_argument('-z', '--pause', action='store_true')
parser.add_argument('-is', '--incrementalsla', type=int, default=60) # incremental SLA minutes
parser.add_argument('-fs', '--fullsla', type=int, default=120) # full SLA minutes
parser.add_argument('-q', '--qospolicy', type=str, choices=['kBackupHDD', 'kBackupSSD', 'kBackupAll'], default='kBackupHDD')
parser.add_argument('-s', '--sourcename', type=str, required=True)
parser.add_argument('-streams', '--streams', type=int, default=16)
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
clustername = args.clustername
mcm = args.mcm
useApiKey = args.useApiKey
password = args.password
noprompt = args.noprompt
mfacode = args.mfacode
emailmfacode = args.emailmfacode
objectname = args.objectname
objectlist = args.objectlist
exclude = args.exclude
sourcename = args.sourcename
streams = args.streams
jobname = args.jobname
policyname = args.policyname
starttime = args.starttime
timezone = args.timezone
incrementalsla = args.incrementalsla
fullsla = args.fullsla
storagedomain = args.storagedomain
pause = args.pause
qospolicy = args.qospolicy
if noprompt is True:
prompt = False
else:
prompt = None
if pause:
isPaused = True
else:
isPaused = False
# gather list function
def gatherList(param=None, filename=None, name='items', required=True):
items = []
if param is not None:
for item in param:
items.append(item)
if filename is not None:
f = open(filename, 'r')
items += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if required is True and len(items) == 0:
print('no %s specified' % name)
exit()
return items
# get list of views to protect
objects = gatherList(objectname, objectlist, name='objects', required=False)
# authenticate
if mcm:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, helios=True, prompt=prompt)
else:
if emailmfacode:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, emailMfaCode=True, prompt=prompt)
else:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, mfaCode=mfacode, prompt=prompt)
# if connected to helios or mcm, select to access cluster
if mcm or vip.lower() == 'helios.cohesity.com':
if clustername is not None:
heliosCluster(clustername)
else:
print('-clustername is required when connecting to Helios or MCM')
exit()
if apiconnected() is False:
print('authentication failed')
exit(1)
# get protection source
registeredSource = [r for r in (api('get', 'protectionSources/registrationInfo?environments=kMongoDB'))['rootNodes'] if r['rootNode']['name'].lower() == sourcename.lower()]
if registeredSource is None or len(registeredSource) == 0:
print('%s is not a registered MongoDB source' % sourcename)
exit(1)
source = api('get', 'protectionSources?id=%s' % registeredSource[0]['rootNode']['id'])
source = source[0]
objectIds = {}
if 'nodes' not in source or source['nodes'] is None or len(source['nodes']) == 0:
print('no databases on %s' % sourcename)
exit(0)
for database in source['nodes']:
objectIds[database['protectionSource']['name']] = database['protectionSource']['id']
for collection in database['nodes']:
objectIds['%s.%s' % (database['protectionSource']['name'], collection['protectionSource']['name'])] = collection['protectionSource']['id']
# get job info
newJob = False
protectionGroups = api('get', 'data-protect/protection-groups?isDeleted=false&isActive=true', v=2)
jobs = protectionGroups['protectionGroups']
job = [job for job in jobs if job['name'].lower() == jobname.lower()]
if not job or len(job) < 1:
newJob = True
# find protectionPolicy
if policyname is None:
print('Policy name required for new job')
exit(1)
policy = [p for p in api('get', 'protectionPolicies') if p['name'].lower() == policyname.lower()]
if len(policy) < 1:
print("Policy '%s' not found!" % policyname)
exit(1)
policyid = policy[0]['id']
# find storage domain
sd = [sd for sd in api('get', 'viewBoxes') if sd['name'].lower() == storagedomain.lower()]
if len(sd) < 1:
print("Storage domain %s not found!" % storagedomain)
exit(1)
sdid = sd[0]['id']
# parse starttime
try:
(hour, minute) = starttime.split(':')
hour = int(hour)
minute = int(minute)
if hour < 0 or hour > 23 or minute < 0 or minute > 59:
print('starttime is invalid!')
exit(1)
except Exception:
print('starttime is invalid!')
exit(1)
job = {
"policyId": policyid,
"startTime": {
"hour": int(hour),
"minute": int(minute),
"timeZone": timezone
},
"priority": "kMedium",
"sla": [
{
"backupRunType": "kFull",
"slaMinutes": fullsla
},
{
"backupRunType": "kIncremental",
"slaMinutes": incrementalsla
}
],
"qosPolicy": qospolicy,
"storageDomainId": sdid,
"name": jobname,
"environment": "kMongoDB",
"isPaused": isPaused,
"description": "",
"alertPolicy": {
"backupRunStatus": [
"kFailure"
],
"alertTargets": []
},
"mongodbParams": {
"objects": [],
"concurrency": streams,
"excludeObjectIds": [],
"bandwidthMBPS": None,
"sourceName": registeredSource[0]['rootNode']['name'],
"sourceId": registeredSource[0]['rootNode']['id']
}
}
else:
job = job[0]
if newJob is True:
print('Creating protection job %s' % jobname)
else:
print('Updating protection job %s' % job['name'])
if len(objects) == 0 or exclude:
print('protecting %s' % sourcename)
job['mongodbParams']['objects'] = [
{
"id": registeredSource[0]['rootNode']['id']
}
]
for oName in objects:
if oName in objectIds:
if exclude:
job['mongodbParams']['excludeObjectIds'].append(objectIds[oName])
print('excluding %s' % oName)
else:
existingObject = [o for o in job['mongodbParams']['objects'] if o['id'] == objectIds[oName]]
if existingObject is None or len(existingObject) == 0:
job['mongodbParams']['objects'].append({"id": objectIds[oName]})
print('protecting %s' % oName)
else:
print('%s already protected' % oName)
else:
print('%s not found' % oName)
if len(job['mongodbParams']['objects']) == 0:
print('noting to protect')
exit(0)
if newJob is True:
result = api('post', 'data-protect/protection-groups', job, v=2)
else:
result = api('put', 'data-protect/protection-groups/%s' % job['id'], job, v=2)
| bseltz-cohesity/scripts | python/protectMongoDB/protectMongoDB.py | protectMongoDB.py | py | 8,412 | python | en | code | 85 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
}
] |
41978343892 | from ja.common.job import JobStatus
from ja.server.config import ServerConfig
from ja.server.database.sql.database import SQLDatabase
from ja.server.database.types.work_machine import WorkMachineState
from ja.server.dispatcher.dispatcher import Dispatcher
from ja.server.dispatcher.proxy_factory import WorkerProxyFactory, WorkerProxyFactoryBase
from ja.server.email_notifier import EmailNotifier, BasicEmailServer
from ja.server.scheduler.algorithm import SchedulingAlgorithm
from ja.server.scheduler.default_algorithm import DefaultSchedulingAlgorithm
from ja.server.scheduler.scheduler import Scheduler
from ja.server.proxy.command_handler import ServerCommandHandler
from ja.server.web.api_server import StatisticsWebServer
import ja.server.scheduler.default_policies as dp
import logging
logger = logging.getLogger(__name__)
class JobCenter:
"""
Main class for the central server daemon. Contains singleton objects of the
following classes: ServerCommandHandler, Database, Scheduler, Dispatcher,
Notifier, HttpServer.
"""
@staticmethod
def _init_algorithm() -> SchedulingAlgorithm:
cost_function = dp.DefaultCostFunction()
return DefaultSchedulingAlgorithm(cost_function,
dp.DefaultNonPreemptiveDistributionPolicy(cost_function),
dp.DefaultBlockingDistributionPolicy(),
dp.DefaultPreemptiveDistributionPolicy(cost_function))
@staticmethod
def _read_config(config_file: str) -> ServerConfig:
with open(config_file) as f:
logger.info("reading %s config file" % config_file)
return ServerConfig.from_string(f.read())
def _cleanup(self) -> None:
for job in self._database.get_current_schedule():
if job.job.status in [JobStatus.RUNNING, JobStatus.PAUSED]:
job.job.status = JobStatus.CRASHED
self._database.update_job(job.job)
self._database.assign_job_machine(job.job, None)
for machine in self._database.get_work_machines():
machine.resources.deallocate(machine.resources.total_resources - machine.resources.free_resources)
machine.state = WorkMachineState.OFFLINE
self._database.update_work_machine(machine)
def __init__(self, config_file: str = "/etc/jobadder/server.conf",
socket_path: str = "/tmp/jobadder-server.socket", database_name: str = "jobadder") -> None:
"""!
Initialize the JobAdder server daemon.
This includes:
1. Parsing the command line arguments and the configuration file.
2. Connecting to the configured database.
3. Initializing the scheduler and the dispatcher.
4. Starting the web server and the email notifier.
@param config_file: the configuration file to use.
@param socket_path: the path to the unix named socket for the command handler to listen on.
@param database_name: the name of the database to use.
"""
config = self._read_config(config_file)
self._database = SQLDatabase(host=config.database_config.host,
port=config.database_config.port,
user=config.database_config.username,
password=config.database_config.password,
database_name=database_name,
max_special_resources=config.special_resources)
self._cleanup()
proxy_factory = self._get_proxy_factory()
self._dispatcher = Dispatcher(proxy_factory)
self._scheduler = Scheduler(self._init_algorithm(), self._dispatcher, config.special_resources)
self._email = EmailNotifier(BasicEmailServer(config.email_config.host,
config.email_config.port,
config.email_config.username,
config.email_config.password))
if config.web_server_port > 0:
self._web_server = StatisticsWebServer("", config.web_server_port, self._database)
else:
self._web_server = None
self._database.set_scheduler_callback(self._scheduler.reschedule)
self._database.set_job_status_callback(self._email.handle_job_status_updated)
self._handler = ServerCommandHandler(self._database, socket_path, config.admin_group)
def _get_proxy_factory(self) -> WorkerProxyFactoryBase:
return WorkerProxyFactory(self._database)
def run(self) -> None:
"""!
Run the main loop of the server daemon.
"""
logger.info("starting main loop")
self._handler.main_loop()
# Cleanup, but don't invoke scheduler anymore.
self._database.set_scheduler_callback(None)
self._cleanup()
if self._web_server:
self._web_server.stop()
| DistributedTaskScheduling/JobAdder | src/ja/server/main.py | main.py | py | 5,061 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ja.server.scheduler.default_policies.DefaultCostFunction",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ja.server.scheduler.default_policies",
"line_number": 29,
"usa... |
30071276942 | from datetime import datetime
from elasticsearch import Elasticsearch
DEBUG = True
def connect_elasticsearch():
_es = None
_es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
if DEBUG:
if _es.ping():
print('[+] Connected to elasticsearch successfully')
else:
print('[-] Couldn\'t connect to elasticsearch')
return False
return _es
def send_to_elasticsearch(index_name, dictionary, doc_type):
es = connect_elasticsearch()
if es != False:
for key, value in dictionary.items():
res = es.index(index=index_name, doc_type=doc_type, body=value)
# print(res['result'])
else:
print("[-] There was a problem connecting to the Elastic Search... skipping")
| CUTLER-H2020/DataCrawlers | Economic/maps.me/elasticsearch_functions.py | elasticsearch_functions.py | py | 773 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 8,
"usage_type": "call"
}
] |
39689533340 | from atlas import Operation, Entity
from physics import Point3D
from rules import Location
from mind.goals.dynamic.DynamicGoal import DynamicGoal
class School(DynamicGoal):
"""Move in a shoal with other animals of the same type"""
def __init__(self, members=[], desc="mirror the movement of other animals like me"):
DynamicGoal.__init__(self,
trigger="sight_move", # replace with something more appropriate
desc=desc)
def event(self, me, original_op, op):
# print "school called"
ent = op[0]
if ent.id == me.entity.id:
# print "detecting myself"
return
ent = me.map.get(ent.id)
if ent is None:
# print "type is none"
return
if ent.name != me.name:
# print "ent.type!=me.entity.type"
return
if not ent.parent:
# print "school.event, ent.parent is None"
return
if not me.entity.parent:
# print "school.event, me.entity.parent is None"
return
if me.entity.parent.id != ent.parent.id:
# print "me.entity.parent.id!=ent.parent.id"
return
if type(ent.location.pos) != object:
# print ent.location.pos
# print type(ent.location.pos)
# print "type(ent.location.pos)!=object"
return
distance = (ent.location.pos - me.entity.location.pos).mag()
focus_id = me.get_knowledge('focus', 'hook')
if focus_id:
thing = me.map.get(focus_id)
if thing is None:
me.remove_knowledge('focus', self.what)
else:
if thing.parent.id != me.entity.parent.id:
me.remove_knowledge('focus', self.what)
else:
if thing.parent.id == me.entity.id:
return
# ensures that the entity will check only other entities really close to it,
# thereby reducing the possibility of infinite loops
if distance < 0.4 and ent.location.velocity:
print("changing only velocity")
new_loc = Location(me.entity.parent)
new_loc.velocity = ent.location.velocity
if distance > 0.4 and ent.location.velocity:
print("changing both location and velocity")
myvel = me.entity.location.velocity.unit_vector()
evel = ent.location.velocity.unit_vector()
edir = (ent.location.pos - me.entity.location.pos).unit_vector()
if myvel and (evel.dot(myvel) > 0.9 or edir.dot(myvel) > 0.9):
return
if edir.dot(evel) < 0:
new_loc = Location(me.entity.parent)
# replace by rotatez?
new_loc.velocity = -ent.location.velocity
else:
new_loc = Location(me.entity.parent)
new_loc.velocity = ent.location.velocity
else:
print("everything perfect, not doing anything")
new_loc = ent.location.copy()
edir = (ent.location.pos - me.entity.location.pos).unit_vector()
new_loc.pos = new_loc.pos - edir
return Operation("move", Entity(me.entity.id, location=new_loc))
class flock(DynamicGoal):
"""Move in a flock with other animals of the same type."""
def __init__(self, members=[], desc="move in flocks with other animals like me"):
DynamicGoal.__init__(self,
trigger="sight_move",
desc=desc)
def event(self, me, original_op, op):
ent = op[0]
# This goal currently causes mayhem. Effectively disable it.
if 1:
return
if ent.id == me.entity.id:
return
ent = me.map.get(ent.id)
if ent == None:
print("not convering on Nothing")
return
if ent.type[0] != me.entity.type[0]:
print("not convering on something not me")
return
if type(ent.parent) == type(None):
print("flock.event, ent.parent is None")
return
if type(me.entity.parent) == type(None):
print("flock.event, me.entity.parent is None")
return
if me.entity.parent.id != ent.parent.id:
print("not convering on something elsewhere")
return
if type(ent.location.pos) != Point3D:
print("position not an Point", type(ent.location.pos))
return
edist = (ent.location.pos - me.entity.location.pos)
if edist.sqr_mag() < 50:
print("not convering on close enough")
return
evel = ent.location.velocity
if evel and evel.sqr_mag() > 0.1:
myvel = me.entity.location.velocity
edir = edist.unit_vector()
if myvel and myvel.sqr_mag() > 0.1:
myvel = myvel.unit_vector()
# If I move in the same direction, then do nothing
if evel.dot(myvel) > 0.5:
print("not convering on moving with")
return
# If I am moving towards them, then do nothing
if edir.dot(myvel) > 0.5:
print("not convering on moving towards them")
return
# If they are coming towards me, then do nothing
if edir.dot(evel) < - 0.5:
print("not convering on moving towards me")
return
new_loc = Location(me.entity.parent)
new_loc.velocity = ent.location.velocity
else:
new_loc = ent.location.copy()
edir = (ent.location.pos - me.entity.location.pos).unit_vector()
new_loc.pos = new_loc.pos - edir
print("converging")
return Operation("move", Entity(me.entity.id, location=new_loc))
class herd(DynamicGoal):
"""Move in a herd with other animals of the same type."""
def __init__(self, members=[], desc="move in herds with other animals like me"):
DynamicGoal.__init__(self,
trigger="sight_move",
desc=desc)
for m in members:
self.herd_members[m] = 51
self.herd_members = {}
def event(self, me, original_op, op):
ent = op[0]
if ent.id == me.entity.id:
return
ent = me.map.get(ent.id)
if ent is None:
return
if ent.type[0] != me.entity.type[0]:
return
if me.entity.parent.id != ent.parent.id:
return
try:
val = self.herd_members[ent.id]
except KeyError:
val = 0
if type(ent.location.pos) != Point3D:
return
if me.entity.location.pos.distance(ent.location.pos) < 6:
val = val + 1
self.herd_members[ent.id] = val
return
# If we have not seen this one before 50 times, then it is not yet
# really a member of the herd
if not val > 50:
return
val = val + 1
self.herd_members[ent.id] = val
if ent.location.velocity:
myvel = me.entity.location.velocity.unit_vector()
evel = ent.location.velocity.unit_vector()
edir = (ent.location.pos - me.entity.location.pos).unit_vector()
# If I am moving towards them, or in the same direction, then do nothing
if myvel and (evel.dot(myvel) > 0.5 or edir.dot(myvel) > 0.5):
return
# If they are coming towards me, then do nothing
if edir.dot(evel) < - 0.5:
return
new_loc = Location(me.entity.parent)
new_loc.velocity = ent.location.velocity
else:
new_loc = ent.location.copy()
edir = (ent.location.pos - me.entity.location.pos).unit_vector()
new_loc.pos = new_loc.pos - edir
return Operation("move", Entity(me.entity.id, location=new_loc))
| worldforge/cyphesis | data/rulesets/basic/scripts/mind/goals/animal/herd.py | herd.py | py | 8,106 | python | en | code | 95 | github-code | 36 | [
{
"api_name": "mind.goals.dynamic.DynamicGoal.DynamicGoal",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "mind.goals.dynamic.DynamicGoal.DynamicGoal.__init__",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mind.goals.dynamic.DynamicGoal.DynamicGoal",
"l... |
26543825982 | from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name = 'PorownywarkaOfert',
version = '0.1',
author = 'saraCzelusniak',
author_email = 'saraCzelusniak@bitbucket.org',
url = '',
description = 'Porownywanie ofert',
long_description = long_description,
package_dir = {'' : 'src'},
#setuptools.find_packages()
packages = ['porownywarkaOfert'],
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7.3',
'Natural Language :: Polish',
],
include_package_data = True,
) | czelsa/porownywarka-ofert | virt2/setup.py | setup.py | py | 683 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
}
] |
6395153010 | from typing import List
def add_value_to_rating(new_value: int, rating: List[int]):
insert_at = len(rating)
for i in range(len(rating)):
if rating[i] < new_value:
insert_at = i
break
rating.insert(insert_at, new_value)
rating = [7, 5, 3, 3, 2]
print('Текущий рейтинг:', rating)
new_value = None
while new_value is None or new_value <= 0:
s = input('Введите новое значение:')
if s.isdigit():
new_value = int(s)
add_value_to_rating(new_value, rating)
print()
print('Новый рейтинг:', rating)
| vlp4/study-python | lesson2/task5.py | task5.py | py | 601 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
74967962022 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# (section:results_wagner)=
# # Identification of a dynamical system for the Wagner function
#
# ## Data generation
#
# As a first step, data for the analytical form of the Wagner function is generated. As seen in {ref}`section:wagner`, this can be done by integrating the Theodorsen function in two different ways, depending on the time step we want to compute:
# ```{math}
# \begin{eqnarray}
# \phi(t) &=& \frac{1}{2} + \frac{2}{\pi}\int_0^\infty \frac{1}{k}\left(G_1(k) - \frac{1}{2}\right)\sin(kt)dk \\
# &=& 1 + \frac{2}{\pi}\int_0^\infty \frac{1}{k}G_2(k)\cos(kt)dk\label{eqn:int_large_t}
# \end{eqnarray}
# ```
#
# Where $G_1$ and $G_2$ are the real and imaginary part of the Thodorsen function.
# In[2]:
import numpy as np
from scipy.special import jv, yn # Bessel functions
from scipy.integrate import quad
def Wagner(t, large_times = False):
if large_times == False:
G= lambda k: (jv(1,k)*(jv(1,k)+yn(0,k))+yn(1,k)*(yn(1,k)-jv(0,k)))/((jv(1,k) + yn(0,k))**2 + (yn(1,k) - jv(0,k))**2)
phi = 1/2 + 2/np.pi*quad(lambda k: 1/k*(G(k)-1/2)*np.sin(k*t), 0, 100, limit = int(100*t)+50)[0]
else:
G= lambda k: (yn(1,k)*yn(0,k)+jv(1,k)*jv(0,k))/((jv(1,k) + yn(0,k))**2 + (yn(1,k) - jv(0,k))**2)
phi = 1 - 2/np.pi*quad(lambda k: 1/k*G(k)*np.cos(k*t), 0, 100, limit = int(100*t))[0]
return phi
# Since the data genearation is very time consuming, it has been carried out once and the results exported to a `.dat` file. Since sometimes the numerical integration procedure produces cosiderable errors a small function has been made in order to clean, even if only roughly, the imported data before proceeding with the SINDy identification.
#
# The function is based on the assumption that for large times the Wagner functions, or better $1-\phi(t)$, behaves like $t^{-2}$. This allows a normalisation that increases the magnitude of the errors and makes them very easy to identify and correct. The correction is made by doing a mean with the nearest uncorrupted data points.
# In[3]:
import matplotlib.pyplot as plt
def clean_data(data, tol = 2):
'''
The functions cleans numerical errors in the integration, based on the hypothesis that
these appear in the last phases of the transient phase, for t>>1, and that in this zone
the derivative is almost zero.
INPUT:
data: n x 2 np.array;
The data to be cleaned, first column being time and the second L(t)
tol: float;
Tolerance on the value of the derivative.
OUTPUT:
data: n x 2 np.array;
Cleaned data.
'''
fig_data, ax_data = plt.subplots()
ax_data.plot(data[:,0],1 - data[:,1])
dt = data[1,0] - data[0,0] # fixed timestep
derivative = (data[1:,1] - data[0:-1,1]) / dt # backward difference
der_normalised = derivative * data[0:-1,0]**2
# Find end of first rising time
rising_tol = 5e-5
end_rise = 0
while np.abs(derivative[end_rise]) > rising_tol: # whith normalised data the value ramps up to one
end_rise += 1
outlier_index = np.where(np.abs(der_normalised) > tol)[0]
outlier_index = outlier_index[np.where(outlier_index > end_rise)]
# find nearest non-corrupted data
# Since numerical errors can come in adiacent points, it is important to find the
# nearest uncorrupted data
if len(outlier_index) != 0:
diff = outlier_index[1:] - outlier_index[0:-1]
# print(diff)
def count_ones(diff):
count = 1
counts = []
for i in range(len(diff)):
if diff[i] == 1:
count += 1
else:
counts.append(count)
count = 1
counts.append(count)
return counts
count = count_ones(diff)
non_corrupted_index = np.zeros((len(count),2), dtype=np.int64)
non_corrupted_index[0,0] = outlier_index[0]-1
j = 0
for i in range(len(diff)):
if diff[i] != 1:
non_corrupted_index[j,1] = outlier_index[i]+1
j += 1
non_corrupted_index[j,0] = outlier_index[i+1]-1
non_corrupted_index[j,1] = outlier_index[-1]+1
# compute the mean with nearest clean data
mean_index = []
for i in range(len(count)):
for j in range(count[i]):
mean_index.append(non_corrupted_index[i,:])
for i in range(len(outlier_index)):
outlier = outlier_index[i]
indices = mean_index[i]
lower_index = indices[0]
upper_index = indices[1]
data[outlier, 1] = (data[lower_index, 1] + data[upper_index, 1]) / 2
# The plotting helps with evaluating the performance of the function
fig, ax = plt.subplots()
ax.plot(data[0:-1, 0], np.abs(derivative))
ax.plot(data[0:-1, 0], np.abs(der_normalised))
ax.axhline(y = tol,color = 'k', linestyle = '--')
ax.axvline(data[end_rise,0], color = 'k', linestyle = '-.')
ax.plot(data[outlier_index,0],np.abs(der_normalised[outlier_index]), color = 'r', marker = 'o', linestyle = 'None')
ax.set_title('Derivatives')
ax.set_xlabel('Time [s]')
ax.set_ylabel('L\'(t)')
ax.set_yscale('log')
ax.legend(['L\'(t)', 'Normalized L\'(t)', 'Tolerance', 'Start of treated zone'])
ax.grid(True)
ax_data.plot(data[:,0],1 - data[:,1])
ax_data.plot(data[outlier_index,0], 1 - data[outlier_index,1], color = 'r', marker = 'o', linestyle = 'None')
ax_data.set_xlabel('Time')
ax_data.set_ylabel('1-L(t)')
ax_data.set_title('Cleaned data')
ax_data.set_yscale('log')
ax_data.grid(True)
ax_data.legend(['Original data', 'Cleaned data', 'Corrupted data'])
plt.show()
return data
wagner_data = np.loadtxt('wagner_data.dat', dtype=float)
cleaned_wagner_data = clean_data(wagner_data)
t = cleaned_wagner_data[:,0]
x = 1 - cleaned_wagner_data[:,1] # normalized, steady-state-subtracted lift (see reference [1])
x = np.array([x]).T
# The fact that generating a clean dataset is so time consuming is a statement to the need of a precise but easily computed model for the Wagner function. As stated in {cite}`dawson2022improved`, this can be done by fitting a simple nonlinear ODE in the polynomial form:
# ```{math}
# \dot L(t) = \sum_{i=0}^r\xi_iL^i(t)
# ```
#
# where $\xi_r$ is the coefficient of the term of order $r$. Once this type of model is correctly identified it can be simulated forward in time as much as needed, or used as part of a closed or open loop controller. In order to do so we have tried to replicate the results proposed in {cite}`dawson2022improved` for $r = 1,\dots,8$
# In[4]:
import pysindy as ps
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
fig3, ax3 = plt.subplots()
for deg in range(0,9):
# fitting model
optimizer = ps.optimizers.stlsq.STLSQ(threshold = 0.1, alpha = 1e-06, max_iter = 10)
library = ps.feature_library.polynomial_library.PolynomialLibrary(degree = deg)
model = ps.SINDy(optimizer = optimizer,
feature_library = library,
feature_names = ['phi']) # default paramaters:
# differentiation method: centered differences
model.fit(x, t = t[1] - t[0]) # fitting the model
x0 = np.array([0.5])
model_x = model.simulate(x0, t) # evaluating forward in time to compare
err_norm = np.abs(x - model_x) / x # noramlised error
err = np.abs(x - model_x) # error
# plotting
if deg > 1:
model.print()
ax1.plot(t, model_x)
ax1.set_xlabel('t [s]')
ax1.set_ylabel(r'L(t) - $L_0$ / $L_0$')
ax1.set_title('Fitted Unsteady Lift')
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.grid(True)
ax2.plot(t, err)
ax2.set_xlabel('t [s]')
ax2.set_ylabel(r'$L(t) - \hat{L}(t)$')
ax2.set_yscale('log')
ax2.set_title('Error')
ax2.grid(True)
ax3.plot(t, err_norm)
ax3.set_xlabel('t [s]')
ax3.set_ylabel(r'$\frac{L(t) - \hat{L}(t)}{1 - L(t)}$')
ax3.set_yscale('log')
ax3.set_title('Normalised Error')
ax3.grid(True)
ax1.plot(t, x, 'k--')
ax1.legend(['r = 2','r = 3','r = 4','r = 5','r = 6','r = 7','r = 8', 'Analytical'])
ax2.legend(['r = 2','r = 3','r = 4','r = 5','r = 6','r = 7','r = 8'], loc = 'upper right')
ax3.legend(['r = 2','r = 3','r = 4','r = 5','r = 6','r = 7','r = 8'], loc = 'lower right')
plt.show()
# As in {cite}`dawson2022improved` $\xi_0$ and $\xi_1$ are always zero. The results for $r=7$ and $r=8$ are quite far from of the others, and in fact perform worse than the $r=6$ case. Possible explanations for this include:
# * The residual errors not corrected by the correction algorithm
# * An attempt of the algorithm to minimise the error at the beginning, with repercussions of the accuracy for greater times
# * A not long enough time window
#
# The results, even though somewhat different from those presented in {cite}`dawson2022improved`, are nonetheless satisfying. In particular for $r=6$ the relative error goes down as much as $10^{-3}$, which is remarkable.
#
# The results show that the SINDy algorithm, even if quite robust, necessitates of very clean and complete data to work properly, especially in terms of repeatability of the results.
#
# It can be tried to fit the data using orthogonal polynomials. To do so it is sufficient to generate the data library using the dedicated script and pass it to the algorithm.
# In[5]:
from PolynomialChaos import *
aPC_Wagner = PolynomialChaos(
x,
expansionDegree = 8,
numberOfInputs = 1)
aPC_Wagner.ComputeCoefficients(threshold = 1e-2, normalize = False)
coefficients = aPC_Wagner.coefficients
AlphaMatrix = aPC_Wagner.AlphaMatrix
LibraryList = GenerateLibraryList(
expansionDegree=8,
coefficients = coefficients,
AlphaMatrix = AlphaMatrix)
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
fig3, ax3 = plt.subplots()
for deg in range(0,9):
# fitting model
optimizer = ps.optimizers.stlsq.STLSQ(threshold = 0.0, alpha = 1e-06, max_iter = 10)
library = ps.feature_library.custom_library.CustomLibrary(LibraryList[0:deg+1])
model = ps.SINDy(optimizer = optimizer,
feature_library = library,
feature_names = ['phi']) # default paramaters:
# differentiation method: centered differences
model.fit(x, t = t[1] - t[0])
x0 = np.array([0.5])
model_x = model.simulate(x0, t)
err_norm = np.abs(x - model_x) / x
err = np.abs(x - model_x)
# plotting
if deg > 1:
model.print()
ax1.plot(t, model_x)
ax1.set_xlabel('t [s]')
ax1.set_ylabel(r'L(t) - $L_0$ / $L_0$')
ax1.set_title('Fitted Unsteady Lift')
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.grid(True)
ax2.plot(t, err)
ax2.set_xlabel('t [s]')
ax2.set_ylabel(r'$L(t) - \hat{L}(t)$')
ax2.set_yscale('log')
ax2.set_title('Error')
ax2.grid(True)
ax3.plot(t, err_norm)
ax3.set_xlabel('t [s]')
ax3.set_ylabel(r'$\frac{L(t) - \hat{L}(t)}{1 - L(t)}$')
ax3.set_yscale('log')
ax3.set_title('Normalised Error')
ax3.grid(True)
ax1.plot(t, x, 'k--')
ax1.legend(['r = 2','r = 3','r = 4','r = 5','r = 6','r = 7','r = 8', 'Analytical'])
ax2.legend(['r = 2','r = 3','r = 4','r = 5','r = 6','r = 7','r = 8'], loc = 'lower right')
ax3.legend(['r = 2','r = 3','r = 4','r = 5','r = 6','r = 7','r = 8'], loc = 'lower right')
plt.show()
# It can be observed that the results are not particularly better. In particular, if any thresholding is applied when fitting the model the result quality decreases drastically, which prevents us from using the sparsity promoting capability of SINDy to the fullest. This results in the appearence of zeroth and first order terms, even though these are cancelled out when higher order terms appear.
#
# However, as one could expect, new terms are added without drastically modifying the previuos ones, which confirms the key property of the orthogonalisation procedure.
#
# Below are reported the the orthogonal polynomials for this dataset.
# In[6]:
aPC_Wagner.printFeatureNames()
| enricofoglia/SIA_RP_Report | _build/jupyter_execute/08WagnerSINDy.py | 08WagnerSINDy.py | py | 12,852 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.special.jv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "scipy.special.yn",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate.quad"... |
39885505055 | import pandas as pd
import re
from csv import reader
import altair as alt
import streamlit as st
from pandas.api.types import is_numeric_dtype
from urllib import request
# Page settings
st.set_page_config(
layout='wide',
initial_sidebar_state="expanded"
)
m = st.markdown("""
<style>
div.stButton > button:first-child {
background-color: #ff4c4c;
color: white;
}
</style>""", unsafe_allow_html=True)
footer="""<style>
a:link , a:visited{
color: blue;
background-color: transparent;
text-decoration: underline;
}
a:hover, a:active {
color: red;
background-color: transparent;
text-decoration: underline;
}
.footer {
position: fixed;
left: 0;
bottom: 0;
width: 100%;
background-color: white;
color: black;
text-align: center;
}
</style>
<div class="footer">
<p>Developed by <a href="mailto: cghisolfi@gannett.com" target="_blank">Caroline Ghisolfi</a></p>
</div>
"""
# Defining dtypes
contribs=['Contribution', 'Contributor', 'contribs']
expend = ['Expenditure', 'Payee', 'expend']
loans = ['Loan', 'Lender', 'loans']
dtypes = [contribs, expend, loans]
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv().encode('utf-8')
def display_download_button(data, label, file_name):
data_csv = convert_df(data)
st.download_button(
label=label,
data=data_csv,
file_name=file_name,
mime='text/csv'
)
@st.cache
def load_filers():
filers = pd.read_csv('https://data-statesman.s3.amazonaws.com/tec-campaign-finance/processed/filers.csv', dtype={'filerIdent': str})
filers.columns = [re.sub( '(?<!^)(?=[A-Z])', ' ', col.replace('Cd', '')).title() for col in filers.columns]
filers['Filer Filerpers Status'] = filers['Filer Filerpers Status'].str.replace('_', ' ')
return filers
def filter_balance(ids):
try:
balance = pd.concat([pd.read_csv(f'https://data-statesman.s3.amazonaws.com/tec-campaign-finance/processed/balance/balance_{id}.csv', dtype={'filer_ident': str}, parse_dates=['received_dt']) for id in ids])
balance.columns = [col.replace('_', ' ').title() for col in balance.columns]
return balance
except:
return pd.DataFrame()
def filter_data(ids, filername, dtype):
# Load dtype vars
var, prefix, var_short = dtype[0], dtype[1], dtype[2]
# Filter data
if var == 'Expenditure':
vardt = 'expend'
else:
vardt = var
try:
data = pd.concat([
pd.read_csv(f'https://data-statesman.s3.amazonaws.com/tec-campaign-finance/processed/{var_short}/{var_short}_{id}.csv',
low_memory=False, parse_dates=[f'{vardt.lower()}_dt', 'received_dt']) for id in ids
])
data.columns = data.columns.str.replace('_', ' ').str.title().str.replace('Expend', 'Expenditure')
filtered_data = data[data['Filer Name'].str.lower() == filername.lower()]
filtered_data['year'] = filtered_data[f'{var} Dt'].dt.year
filtered_data = filtered_data[[
col for col in filtered_data.columns if ('Filer' not in col or col in ['Filer Ident', 'Filer Name']) and 'Office' not in col
]]
return filtered_data
except:
return []
def display_filertable(filertable):
if filertable['Filer Ident'].nunique() > 1:
name = filertable['Filer Name'].iloc[0]
st.warning(f'More than one filer ID found for {name}. Please consult the filer information box below and verify that all filer records listed match your search.')
with st.expander('Filer Information'):
st.dataframe(filertable.fillna(''))
def display_balance_stats(filtered_balance):
if len(filtered_balance) > 0:
# Display balance stats
last_balance_amount = round(filtered_balance.iloc[0]['Balance'])
prev_balance_amount = round(filtered_balance.iloc[1]['Balance'])
balance_diff = round(last_balance_amount - prev_balance_amount)
last_balance_date = filtered_balance.iloc[0]['Received Dt'].strftime('%b %d, %Y')
# Display balance stats
st.metric(label=f"Latest Balance (filed on {last_balance_date})", value='${:,}'.format(last_balance_amount), delta='{:,}'.format(balance_diff))
def display_balance_data(filername, filtered_balance):
if len(filtered_balance) > 0:
with st.expander(f'Balance'):
st.info('Balance is calculated as: (Total Contributions + Total Unitemized Contributions + Total Contributions Mantained + Total Interest & Income Earned) \
- (Total Expenditures + Total Unitemized Expenditures + Total Outstanding Loans + Total Unitemized Loans)')
display_download_button(filtered_balance, f"Download balance data", f"balance_{filername}.csv")
st.dataframe(filtered_balance)
else:
st.warning('No balance data to display')
def display_stats(dtype):
# Load dtype vars
var, prefix, filtered_data = dtype[0], dtype[1], dtype[3]
if len(filtered_data) > 0:
# Get stats
this_year = filtered_data.year.max()
if this_year - 1 in filtered_data.year.unique():
monthly = filtered_data.groupby([pd.Grouper(key=f'{var} Dt', freq='M'), 'year'])\
.agg(amount = (f'{var} Amount', 'sum'), count = (f'{var} Amount', 'count')).reset_index().fillna(0)
avg_amount_this_year = round(monthly[monthly.year == this_year]['amount'].mean())
avg_amount_last_year = round(monthly[monthly.year == this_year - 1]['amount'].mean())
avg_amount_diff = round(avg_amount_this_year - avg_amount_last_year)
avg_count_this_year = round(monthly[monthly.year == this_year]['count'].mean())
avg_count_last_year = round(monthly[monthly.year == this_year - 1]['count'].mean())
avg_count_diff = round(avg_count_this_year - avg_count_last_year)
top_contrib_this_year = filtered_data[filtered_data.year == this_year].sort_values(f'{var} Amount', ascending=False).iloc[0]
top_contrib_this_year_name = top_contrib_this_year[f'{prefix} Name']
top_contrib_this_year_info = 'Located in ' + str(top_contrib_this_year[f'{prefix} Location'])
try:
if top_contrib_this_year[f'{prefix} Employer'] != '':
top_contrib_this_year_info = 'Employed by ' + str(top_contrib_this_year[f'{prefix} Employer']) + ' in ' + str(top_contrib_this_year[f'{prefix} Location'])
except:
pass
# Display stats
col1, col2, col3 = st.columns(3)
col1.metric(label=f"Avg Monthly {var} Amount ({this_year})", value='${:,}'.format(avg_amount_this_year), delta='{:,}'.format(avg_amount_diff))
col2.metric(label=f"Avg Monthly {var} Count ({this_year})", value='{:,}'.format(avg_count_this_year), delta='{:,}'.format(avg_count_diff))
col3.metric(label=f'Top {prefix} ({this_year})', value=top_contrib_this_year_name)
# col3.markdown(top_contrib_this_year_info)
col3.markdown(f'<p style="position: relative; top:-20px; color:grey">{top_contrib_this_year_info}</p>', unsafe_allow_html=True)
def group_data(var, prefix, filtered_data):
filtered_data.fillna('', inplace=True)
filtered_data[f'{var} Dt'] = pd.to_datetime(filtered_data[f'{var} Dt'])
filtered_data = filtered_data[filtered_data[f'{var} Dt'].dt.year >= 2017]
for col in filtered_data.columns:
if list(filtered_data[col].unique()) == ['']:
filtered_data.drop(columns=[col], inplace=True)
grouped = filtered_data.groupby([col for col in filtered_data.columns if prefix in col or col == 'year'])\
.agg(count = ('Report Info Ident', 'count'), amount = (f'{var} Amount', 'sum'))\
.reset_index()\
.rename(columns={'count': var + 's', 'amount':f'{var} Amount' })\
.pivot(index=[col for col in filtered_data.columns if prefix in col], columns='year', values=[f'{var} Amount', var + 's'])\
.fillna(0)
if len(grouped) > 0:
grouped.sort_values(grouped.columns[-1], ascending=False, inplace=True)
return grouped
def display_data(dtype, filername):
# Load dtype vars
var, prefix, var_short, filtered_data = dtype[0], dtype[1], dtype[2], dtype[3]
# Display data table
if len(filtered_data) > 0:
count = len(filtered_data)
count_str = '{:,}'.format(count)
grouped_data = group_data(var, prefix, filtered_data)
grouped_count = len(grouped_data)
grouped_count_str = '{:,}'.format(grouped_count)
for col in [f'{var} Dt', 'Received Dt']:
filtered_data[col] = pd.to_datetime(filtered_data[col])
filtered_data = filtered_data[filtered_data[col].isna() == False]
date_min, date_max = filtered_data[f'{var} Dt'].min().strftime(format='%b %d, %Y'), filtered_data[f'{var} Dt'].max().strftime(format='%b %d, %Y')
for col in [f'{var} Dt', 'Received Dt']:
filtered_data[col] = filtered_data[col].apply(lambda x: x.strftime(format='%Y-%m-%d'))
for col in [col for col in filtered_data.columns if prefix in col]:
filtered_data[col] = filtered_data[col].fillna('')
with st.expander(f'{var}s'):
# Data
st.markdown(f'**All {var}s**')
if date_min != date_max:
st.markdown(f'{count_str} {var.lower()}s between {date_min} and {date_max}')
elif count == 1:
st.markdown(f'{count_str} {var.lower()} on {date_min}')
else:
st.markdown(f'{count_str} {var.lower()}s on {date_min}')
display_download_button(filtered_data.drop(columns=['year']), f"Download {var.lower()} data", f"data_{var.lower()}_{filername}.csv")
st.dataframe(filtered_data.fillna('').drop(columns=['year']))
# Grouped data
st.markdown(f'**Unique {prefix}s**')
st.markdown(f'{grouped_count_str} unique {prefix.lower()}s')
display_download_button(grouped_data.reset_index().reset_index(drop=True), f"Download unique {prefix.lower()}s data", f"{prefix.lower()}_{filername}.csv")
st.dataframe(grouped_data)
else:
st.warning(f'No {var.lower()} data to display')
def get_common(concat_dfs, dtype):
var, prefix = dtype[0], dtype[1]
grouped = concat_dfs.fillna('').groupby([col for col in concat_dfs.columns if prefix.lower() in col.lower() or col == 'Filer Name' or col == 'year'])[f'{var} Amount'].sum().reset_index()
name_cols = list(grouped['Filer Name'].unique())
common = grouped.pivot(index=[col for col in concat_dfs.columns if prefix.lower() in col.lower() or col == 'year'], columns='Filer Name', values=f'{var} Amount').reset_index()
common.rename(columns={'year': 'Year'}, inplace=True)
for col in name_cols:
common = common[common[col].isna() == False]
for col in common.columns:
if list(common[col].unique()) == ['']:
common.drop(columns=[col], inplace=True)
common.reset_index(drop=True, inplace=True)
common.sort_values('Year', ascending=False, inplace=True)
return common
def display_common(concat_dfs, dtype, names):
var, prefix = dtype[0], dtype[1]
st.markdown(f'**Shared {prefix}s**')
common = get_common(concat_dfs, dtype)
if len(common) > 0:
names_forfile = '_'.join(names)
display_download_button(common, f"Download shared {prefix.lower()}s data", f"shared_{prefix.lower()}s_{names_forfile}.csv")
st.dataframe(common)
else:
st.write(f'No shared {prefix.lower()}s to display')
def make_line_chart(data):
name_col = data.columns[0]
date_col = data.columns[1]
value_col = data.columns[2]
data[value_col] = data[value_col].astype(float).round(1)
data[name_col] = data[name_col].apply(lambda x: x.replace(' ', ' ').replace(',', '').replace('.', ' ')).apply(lambda x: re.sub("([\(\[]).*?([\)\]])", "", x))
base = alt.Chart(data).encode(
x=alt.X(date_col, axis=alt.Axis(format = ("%b %Y")))
)
names = sorted(data[name_col].unique())
tooltips = [alt.Tooltip(c, type='quantitative', title=c, format="$,.2f") for c in names]
tooltips.insert(0, alt.Tooltip(date_col, title=date_col))
selection = alt.selection_single(
fields=[date_col], nearest=True, on='mouseover', empty='none', clear='mouseout'
)
lines = base.mark_line(interpolate='catmull-rom').encode(
y=alt.Y(value_col, title='', axis=alt.Axis(format="$~s")),
color=alt.Color(name_col, legend=alt.Legend()))
points = lines.mark_point().transform_filter(selection)
rule = base.transform_pivot(
pivot=name_col, value=value_col, groupby=[date_col]
).mark_rule().encode(
opacity=alt.condition(selection, alt.value(0.3), alt.value(0)),
tooltip=tooltips
).add_selection(selection)
chart = lines + points + rule
return chart
def get_var_totals(concat_dfs, dtype):
var = dtype[0]
concat_dfs[f'{var} Dt'] = pd.to_datetime(concat_dfs[f'{var} Dt'])
grouped = concat_dfs.groupby(['Filer Name', pd.Grouper(key=f'{var} Dt', freq='m')])[f'{var} Amount'].sum().reset_index()
grouped[f'{var} Dt'] = pd.to_datetime(grouped[f'{var} Dt'])
for_download = grouped.pivot(index=f'{var} Dt', columns='Filer Name', values=f'{var} Amount').reset_index()
return grouped, for_download
def display_var_totals_chart(dtype, concat_dfs, names):
var = dtype[0]
names_forfile = '_'.join(names)
chart_data, for_download = get_var_totals(concat_dfs, dtype)
chart = make_line_chart(chart_data)
st.markdown(f'**{var} Monthly Totals**')
display_download_button(for_download, f"Download {var.lower()} monthly totals data", f"monthly_totals_{var.lower()}_{names_forfile}.csv")
st.altair_chart(chart, use_container_width=True)
#____________________________________________________________ GET FILER DATA ____________________________________________________________
def get_filer_data():
# Load filers
filers = load_filers()
# Display filters
filertypeW = st.multiselect(options=['INDIVIDUAL', 'ENTITY'],
label='Select one or more filer types')
filernameW = st.multiselect(options=list(filers[filers['Filer Persent Type'].isin(filertypeW)]['Filer Name'].unique()),
label='Select one or more filers by name', help="Begin tying the entity's name or the filer's LAST NAME to view options")
if len(filernameW) == 1:
st.info('Add another filer to generate comparison data.')
data = []
# Display filertable
for filername in filernameW:
ids = list(filers[filers['Filer Name'] == filername]['Filer Ident'].unique())
# Redefining dtypes
contribs=['Contribution', 'Contributor', 'contribs']
expend = ['Expenditure', 'Payee', 'expend']
loans = ['Loan', 'Lender', 'loans']
dtypes = [contribs, expend, loans]
# Filter data
with st.spinner('Loading data...'):
for dtype in dtypes:
filtered_data = filter_data(ids, filername, dtype)
dtype.append(filtered_data) ## dtype format now [var, prefix, varshort, filtered_data]
# Balance data
filtered_balance = filter_balance(ids)
st.markdown(f'### {filername}')
# Display balance stats
display_balance_stats(filtered_balance)
# Display stats for each dtype
for dtype in dtypes:
if len(dtype[3]) > 0: # filtered_data is dataframe
data.append([dtype[0], dtype[3]])
display_stats(dtype)
# Display filertable
filertable = filers[filers['Filer Name'] == filername].reset_index(drop=True).dropna(how='all', axis=1)
display_filertable(filertable)
# Display balance data
display_balance_data(filername, filtered_balance)
# Display data
for dtype in dtypes:
display_data(dtype, filername)
compare_filers(filernameW, data, filers)
#____________________________________________________________ COMPARE FILERS ____________________________________________________________
def compare_filers(filernameW, data, filers):
if len(filernameW) > 1:
st.markdown("""---""")
st.markdown('<p style="background-color: #ff4c4c; text-align: center; color:white; font-size: 20px; width:100%;"><b>FILER COMPARISON</b></p>', unsafe_allow_html=True)
for dtype in dtypes:
dfs = [el[1] for el in data if el[0] == dtype[0]]
if len(dfs) > 0:
concat_dfs = pd.concat(dfs)
if len(concat_dfs) >= 10 and concat_dfs['Filer Name'].nunique() > 1:
# Making main vars
names = [re.sub("([\(\[]).*?([\)\]])", "", name.replace(',', '').replace('.', '').replace(' ', '-')).strip() for name in list(concat_dfs['Filer Name'].unique())]
with st.expander(f'Compare {dtype[0]}s'):
# Display chart
display_var_totals_chart(dtype, concat_dfs, names)
# Display common
display_common(concat_dfs, dtype, names)
else:
st.warning(f'Insufficient data to compare {dtype[0].lower()}s')
else:
st.warning(f'Insufficient data to compare {dtype[0].lower()}s')
def main():
st.image('https://upload.wikimedia.org/wikipedia/commons/d/d9/Austin_American-Statesman_%282019-10-31%29.svg', width=300)
st.title('Campaign Finance Data Tool')
with request.urlopen('https://data-statesman.s3.amazonaws.com/tec-campaign-finance/documentation/last_update.txt') as f:
last_update = f.read().decode('utf-8')
st.markdown(f'##### Last update: {last_update}')
st.markdown("""
This application processes and visualizes the **last five years** of campaign finance data released by the Texas Ethics Commission.
The raw data is available for download [here](https://www.ethics.state.tx.us/data/search/cf/CFS-ReadMe.txt). According to your selection of filers,
the application will display balance, contribution, expenditure and loan data for each filer
and then compare totals for the filers you selected.
To begin, select one or more filers by **TYPE** and **NAME**.
""")
get_filer_data()
st.markdown(footer,unsafe_allow_html=True)
if __name__ == '__main__':
main() | ceghisolfi/tec-campaign-finance | analysis/campaign-finance-app.py | campaign-finance-app.py | py | 18,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.download_button",
"line_number": 69,
"usage_type": "call"
},
{
"api_name":... |
39240946152 | """
Writes MESS input for a monte carlo partition function calculation
"""
import os
from mako.template import Template
from mess_io.writer import util
# OBTAIN THE PATH TO THE DIRECTORY CONTAINING THE TEMPLATES #
SRC_PATH = os.path.dirname(os.path.realpath(__file__))
TEMPLATE_PATH = os.path.join(SRC_PATH, 'templates')
SECTION_PATH = os.path.join(TEMPLATE_PATH, 'sections')
MONTE_CARLO_PATH = os.path.join(SECTION_PATH, 'monte_carlo')
def monte_carlo(geom, formula,
flux_mode_str, data_file_name,
ground_energy, reference_energy):
""" Writes a monte carlo species section
"""
# Format the molecule specification section
natoms, atom_list = util.molec_spec_format(geom)
# Indent various strings string if needed
flux_mode_str = util.indent(flux_mode_str, 4)
# Create dictionary to fill template
monte_carlo_keys = {
'formula': formula,
'natoms': natoms,
'atom_list': atom_list,
'flux_mode_str': flux_mode_str,
'data_file_name': data_file_name,
'ground_energy': ground_energy,
'reference_energy': reference_energy
}
# Set template name and path for a monte carlo species section
template_file_name = 'monte_carlo.mako'
template_file_path = os.path.join(MONTE_CARLO_PATH, template_file_name)
# Build monte carlo section string
mc_str = Template(filename=template_file_path).render(**monte_carlo_keys)
return mc_str
def fluxional_mode(atom_indices, span=360.0):
""" Writes the string for each fluxional mode
"""
# Format the aotm indices string
atom_indices = util.format_flux_mode_indices(atom_indices)
# Create dictionary to fill template
flux_mode_keys = {
'atom_indices': atom_indices,
'span': span,
}
# Set template name and path for a monte carlo species section
template_file_name = 'fluxional_mode.mako'
template_file_path = os.path.join(MONTE_CARLO_PATH, template_file_name)
# Build monte carlo section string
flux_str = Template(filename=template_file_path).render(**flux_mode_keys)
return flux_str
| sjklipp/interfaces_1219 | mess_io/writer/monte_carlo.py | monte_carlo.py | py | 2,141 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
35026081994 | import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-notebook')
import re
import string
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras import Sequential, Input
from tensorflow.keras.layers import Dense, Bidirectional, LSTM, Dropout, Embedding
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
data = pd.read_csv('Data/train.csv',encoding = "ISO-8859-1")
print(data['Sentiment'].value_counts()/len(data))
punctuation = string.punctuation
def clean(sentence):
sentence = sentence.lower()
sentence = sentence.strip('#')
sentence = re.sub(r'@\w+', '', sentence)
sentence = re.sub(r'http\S+', '', sentence)
sentence = ''.join(ch for ch in sentence if ch not in set(punctuation))
sentence = ''.join([i for i in sentence if not i.isdigit()])
sentence = ' '.join(sentence.split())
return sentence
data['SentimentText'] = data['SentimentText'].apply(clean)
WNL = WordNetLemmatizer()
def lemma(sentence):
s = list()
for x in sentence.split():
s.append(WNL.lemmatize(x))
return ' '.join(s)
data['SentimentText'] = data['SentimentText'].apply(lemma)
X_train, X_val, y_train, y_val = train_test_split(data['SentimentText'], data['Sentiment'], test_size=0.1, random_state=37)
print('# Train data samples:', X_train.shape[0])
print('# Validation data samples:', X_val.shape[0])
assert X_train.shape[0] == y_train.shape[0]
assert X_val.shape[0] == y_val.shape[0]
NB_WORDS = 10000 # Parameter indicating the number of words we'll put in the dictionary
tk = Tokenizer(num_words=NB_WORDS,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ")
tk.fit_on_texts(X_train)
X_train_seq = tk.texts_to_sequences(X_train)
X_val_seq = tk.texts_to_sequences(X_val)
seq_lengths = data['SentimentText'].apply(lambda x: len(x.split(' ')))
MAX_LEN = seq_lengths.max()
X_train_seq_trunc = pad_sequences(X_train_seq, maxlen=MAX_LEN)
X_val_seq_trunc = pad_sequences(X_val_seq, maxlen=MAX_LEN)
model = Sequential()
model.add(Embedding(NB_WORDS, 8, input_length=MAX_LEN))
model.add(Bidirectional(LSTM(16)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print('Shape of the data Train, Validation on X,y')
print((X_train_seq_trunc.shape, y_train.shape), (X_val_seq_trunc.shape,y_val.shape))
history = model.fit(X_train_seq_trunc, y_train,
epochs=10,
batch_size=512,
validation_data=(X_val_seq_trunc,y_val))
def eval_metric(history, metric_name):
metric = history.history[metric_name]
val_metric = history.history['val_' + metric_name]
e = range(1, 10 + 1)
plt.plot(e, metric, 'bo', label='Train ' + metric_name)
plt.plot(e, val_metric, 'b', label='Validation ' + metric_name)
plt.legend()
plt.show()
eval_metric(history, 'acc')
eval_metric(history, 'loss')
''' It seems that our builded model is not good enough but no the problem is with embeddings so to improve i suggest folow the following steps:-
1. Download the glove model from (https://nlp.stanford.edu/projects/glove/) i.e glove.twitter.27B.zip
2. Extract the txt file with 100 dimensions (It will be enough and efficient more than 1000 dimension of ours) glove.twitter.27B.100d.txt
3. Place txt file with your code and uncomment the following lines of code
'''
'''
glove_file = 'glove.twitter.27B.' + str(GLOVE_DIM) + 'd.txt'
emb_dict = {}
glove = open(input_path / glove_file)
for line in glove:
values = line.split()
word = values[0]
vector = np.asarray(values[1:], dtype='float32')
emb_dict[word] = vector
glove.close()
'''
#To feed this into an Embedding layer, we need to build a matrix containing the words in the tweets and their representative word embedding.
#So this matrix will be of shape (NB_WORDS=1000, GLOVE_DIM=100)
'''
emb_matrix = np.zeros((NB_WORDS, GLOVE_DIM))
for w, i in tk.word_index.items(): #tk is tokenized from previous model
# The word_index contains a token for all words of the training data so we need to limit that
if i < NB_WORDS:
vect = emb_dict.get(w)
# Check if the word from the training data occurs in the GloVe word embeddings
# Otherwise the vector is kept with only zeros
if vect is not None:
emb_matrix[i] = vect
else:
break
'''
# All the parameters contain same meaning from previous apply on same model but with new GLOVE Dimension
'''
model = Sequential()
model.add(Embedding(NB_WORDS, GLOVE_DIM, input_length=MAX_LEN))
model.add(Bidirectional(LSTM(16)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.layers[0].set_weights([emb_matrix])
model.layers[0].trainable = False
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train_seq_trunc, y_train,
epochs=10,
batch_size=512,
validation_data=(X_val_seq_trunc,y_val))
'''
# You would definetly get a more accurate version of same model with Glove (word vector representation)
| Ravi-Maurya/Natural_Language_Processing | TwitterClassification/TwitterClassification.py | TwitterClassification.py | py | 5,410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 7,
"usage_type": "attribute"
},
{
"api... |
26540694817 | import math
from PyQt5.QtWidgets import QApplication, QLabel, QLineEdit, QPushButton, QVBoxLayout, QWidget, QCheckBox
class Quader():
def __init__(self, länge, breite, höhe):
self.A = int(länge)
self.B = int(breite)
self.C = int(höhe)
def __str__(self):
outputStr = "----------------------- \n"
outputStr += "Seite A: " + str(self.A) + "\n"
outputStr += "Seite B: " + str(self.B) + "\n"
outputStr += "Seite C: " + str(self.C) + "\n"
outputStr += "Volumen: " + str(self.getVolume())
return outputStr
def getVolume(self):
volume = self.A * self.B * self.C
return volume
class Zylinder():
def __init__(self, breite, höhe):
self.r = int(breite)
self.h = int(höhe)
def __str__(self):
outputStr = "----------------------- \n"
outputStr += "Radius: " + str(self.r) + "\n"
outputStr += "Höhe: " + str(self.h) + "\n"
outputStr += "Volumen: " + str(self.getVolume())
return outputStr
def getVolume(self):
# V = r^2 * pi * h
volume = (self.r * self.r * math.pi) * self.h
return volume
class Pyramide():
def __init__(self, breite, höhe):
self.A = int(breite)
self.h = int(höhe)
def __str__(self):
outputStr = "----------------------- \n"
outputStr += "Seitenlänge: " + str(self.A) + "\n"
outputStr += "Höhe: " + str(self.h) + "\n"
outputStr += "Volumen: " + str(self.getVolume())
return outputStr
def getVolume(self):
volume = 1/3 * self.A * self.A * self.h
return volume
class UserInterface():
def qtBox(self):
self.app = QApplication([])
self.window = QWidget()
self.layout = QVBoxLayout()
# jeweils ein Label und ein Eingabefeld (LineEdit) für Email und Password
self.layout.addWidget(QLabel("Länge: "))
self.längeLE = QLineEdit()
self.layout.addWidget(self.längeLE)
self.layout.addWidget(QLabel("Breite/Radius: "))
self.breiteLE = QLineEdit()
self.layout.addWidget(self.breiteLE)
self.layout.addWidget(QLabel("Höhe: "))
self.höheLE = QLineEdit()
self.layout.addWidget(self.höheLE)
# Drei Checkboxen für die Auswahl zwischen Quader, Zylinder und Pyramide
self.quaderC = QCheckBox("Quader")
self.layout.addWidget(self.quaderC)
self.zylinderC = QCheckBox("Zylinder")
self.layout.addWidget(self.zylinderC)
self.pyramideC = QCheckBox("Pyramide")
self.layout.addWidget(self.pyramideC)
self.button = QPushButton("Los!")
self.layout.addWidget(self.button)
#Drei Label für die Ergebnisse, die in berrechnen() mit setText() gefüllt werden
self.txt1 = QLabel()
self.layout.addWidget(self.txt1)
self.txt2 = QLabel()
self.layout.addWidget(self.txt2)
self.txt3 = QLabel()
self.layout.addWidget(self.txt3)
self.buttonQuit = QPushButton("Schließen")
self.layout.addWidget(self.buttonQuit)
def button_quit():
self.app.quit()
def on_button_clicked():
#Vorherige Werte löschen und mit berrechnen() neue Werte einsetzen
self.txt1.setText("")
self.txt2.setText("")
self.txt3.setText("")
self.berrechnen()
self.button.clicked.connect(on_button_clicked)
self.buttonQuit.clicked.connect(button_quit)
# Das oben erstellte Layout dem Fenster zuweisen und das Fenster öffnen
self.window.setLayout(self.layout)
self.window.show()
self.app.exec()
def berrechnen(self):
if self.quaderC.isChecked():
myQuader = Quader(self.längeLE.text(), self.breiteLE.text(), self.höheLE.text())
self.txt1.setText(str(myQuader.getVolume()))
if self.zylinderC.isChecked():
myZylinder = Zylinder(self.breiteLE.text(), self.höheLE.text())
self.txt2.setText(str(myZylinder.getVolume()))
if self.pyramideC.isChecked():
myPyramide = Pyramide(self.breiteLE.text(), self.höheLE.text())
self.txt3.setText(str(myPyramide.getVolume()))
def __init__(self):
pass
start = UserInterface()
start.qtBox()
| su595/MyPython | math programs/volumenRechner.py | volumenRechner.py | py | 4,416 | python | de | code | 2 | github-code | 36 | [
{
"api_name": "math.pi",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "PyQ... |
73742845865 | import logging
import json
import os
from common import utils
from table_order.table_order_item_list import TableOrderItemList
# 環境変数の取得
LOGGER_LEVEL = os.environ.get("LOGGER_LEVEL")
# ログ出力の設定
logger = logging.getLogger()
if LOGGER_LEVEL == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# ログ出力の設定
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# テーブル操作クラスの初期化
item_master_table_controller = TableOrderItemList()
def get_category():
"""
カテゴリIDとカテゴリ名を返却する
Returns
-------
categories:dict
商品カテゴリ一覧情報
"""
categories = item_master_table_controller.scan()
for category in categories:
category.pop('items')
return categories
def lambda_handler(event, context):
"""
商品カテゴリ情報を返す
Parameters
----------
event : dict
フロントより渡されたパラメータ
context : dict
コンテキスト内容。
Returns
-------
categories : dict
商品カテゴリ一覧情報
"""
# パラメータログ
logger.info(event)
try:
categories = get_category()
except Exception as e:
logger.error('Occur Exception: %s', e)
return utils.create_error_response('ERROR')
return utils.create_success_response(
json.dumps(categories, default=utils.decimal_to_int,
ensure_ascii=False))
| line/line-api-use-case-table-order | backend/APP/category_get/category_get.py | category_get.py | py | 1,543 | python | ja | code | 13 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"li... |
38565479182 | from gluonts.dataset.util import to_pandas
import logging
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gluonts.dataset.field_names import FieldName
import os
import numpy as np
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
TARGET_DIM = 4
def time_format_from_frequency_str(freq_str: str) :
"""
get suitable time_stamp format for this frequency
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseonds
U, us microseconds
N nanoseconds
"""
features_by_offsets = {
offsets.YearOffset: '%Y',
offsets.MonthOffset: '%Y-%m',
offsets.Week: '%Y-%W',
offsets.Day: '%Y-%m-%d',
offsets.BusinessDay: '%Y-%m-%d',
offsets.Hour: '%Y-%m-%d %H:%M:%S',
offsets.Minute: '%Y-%m-%d %H:%M:%S',
}
offset = to_offset(freq_str)
for offset_type, format_pattern in features_by_offsets.items():
if isinstance(offset, offset_type):
return format_pattern
def create_dataset_if_not_exist(paths, start, past_length , pred_length , slice , timestep , freq):
for name in paths:
path = paths[name]
if not os.path.exists(path):
logging.info('there is no dataset [%s] , creating...' % name)
os.system('python data_process/preprocessing.py -st="{}" -d="{}" -t="{}" -p="{}" -s="{}" -n="{}" -f="{}"'
.format( start
, name
, past_length
, pred_length
, slice
, timestep
, freq))
else:
logging.info(' dataset [%s] was found , good~~~' % name)
def add_time_mark_to_file(path):
'''
add time for the duplicated files
'''
count = 1
if not os.path.exists(path):
return path
file_name_list = os.path.splitext(os.path.basename(path))
father = os.path.split(path)[0]
new_path = os.path.join(father ,file_name_list[0]+'_%d'%(count)+file_name_list[1])
while os.path.exists(new_path):
count += 1
new_path = os.path.join(father, file_name_list[0] + '_%d'%(count) + file_name_list[1])
return new_path
def add_time_mark_to_dir(path):
'''
add time for the duplicated directory
'''
if not os.path.exists(path):
return path
count = 1;
father = os.path.split(path)[0]
dir_name = os.path.split(path)[-1]
new_path = os.path.join(father ,dir_name+'_%d'%(count))
while os.path.exists(new_path):
count += 1
new_path = os.path.join(father ,dir_name+'_%d'%(count))
return new_path
def weighted_average(
metrics, weights = None, axis=None
):
"""
compute weighted average
metrics #(ssm_num , bs,seq_length )
weights #(ssm_num , bs , seq_length)
axis
"""
import tensorflow as tf
if weights is not None:
weighted_tensor = metrics * weights
sum_weights = tf.math.maximum(1.0, tf.math.reduce_sum(weights,axis=axis))
return tf.math.reduce_sum(weighted_tensor,axis=axis) / sum_weights
else:
return tf.math.reduce_mean(metrics, axis=axis)
def sample_without_put_back(samples, size):
result = []
for i in range(size):
z = np.random.choice(samples, 1)[0]
result.append(z)
index = np.where(samples == z)
samples = np.delete(samples, index)
return result
def plot_train_pred(path, targets_name, data, pred, batch, epoch, plot_num, plot_length, time_start, freq, nan_data=0):
'''
:param path: path to store picture
:param targets_name: dataset name
:param data: real dataset #(ssm_num , bs , seq , 1)
:param nan_data: fill missing data with nan_data
:param pred: prediction length
:param batch: batch number
:param epoch: epoch number
:param plot_num: picture number(random choice)
:param plot_length: training length
:param time_start : start time of the picture
:param freq : frequency of the dataset
:return:
'''
data = np.squeeze(data , -1)
pred = np.squeeze(pred ,-1)
root_path = os.path.join(path , 'train_pred_pic')
if plot_num > data.shape[1]:
plot_num = data.shape[1]
samples_no = sample_without_put_back(np.arange(data.shape[1]) , plot_num)
current_dir = os.path.join(root_path, 'epoch({})'.format(epoch))
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
for ssm_no in np.arange(data.shape[0]):
for sample in samples_no:
pic_name = os.path.join(current_dir, 'batch_no({})_dataset({})_sample({})'.format(batch, targets_name.split(',')[ssm_no], sample))
time_range = pd.date_range(time_start[sample], periods=data.shape[2], freq=freq)
time_range = time_range[-plot_length:]
s1 = data[ssm_no , sample ,-plot_length:]
s1[s1 == nan_data] = np.NaN
s2 = pred[ssm_no , sample ,-plot_length:]
fig = plt.figure(figsize=(28, 21))
ax = fig.add_subplot(1, 1, 1)
ax.set_title('DATASET({}) EPOCHE({} prediction result)'.format(targets_name.split(',')[ssm_no], epoch), fontsize=30)
ax.plot(time_range, s1, linestyle='-', color='tab:green', marker='D', label='Truth')
ax.plot(time_range, s2, linestyle='-.', color='tab:blue', marker='o', label='pred_mean')
ax.xaxis.set_tick_params(labelsize=21)
ax.yaxis.set_tick_params(labelsize=21)
ax.legend(prop={'size': 31}, edgecolor='red', facecolor='#e8dfdf')
plt.savefig(pic_name)
plt.close(fig)
def plot_train_pred_NoSSMnum(path, targets_name, data, pred, batch, epoch, ssm_no, plot_num, plot_length, time_start, freq, nan_data=0):
'''
the same as `plot_train_pred`, but without dimension SSM
'''
# decline extra dimension
data = np.squeeze(data , -1) #(bs, seq)
pred = np.squeeze(pred ,-1)
root_path = os.path.join(path , 'train_pred_pic')
if plot_num > data.shape[1]:
plot_num = data.shape[1]
samples_no = sample_without_put_back(np.arange(data.shape[0]) , plot_num)
current_dir = os.path.join(root_path, 'epoch({})'.format(epoch))
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
for sample in samples_no:
pic_name = os.path.join(current_dir , 'batch_no({})_ssm_no({})_sample({})'.format(batch,ssm_no,sample))
time_range = pd.date_range(time_start[sample], periods=data.shape[1], freq=freq)
time_range = time_range[-plot_length:]
s1 = data[sample, -plot_length:]
s1[s1 == nan_data] = np.NaN
s2 = pred[sample , -plot_length:]
fig = plt.figure(figsize = (28,21))
ax = fig.add_subplot(1, 1, 1)
ax.set_title('DATASET({}) EPOCHE({} prediction result)'.format(targets_name.split(',')[ssm_no], epoch), fontsize=30)
ax.plot(time_range , s1 , linestyle = '-' , color = 'tab:green' , marker = 'D' ,label = 'Truth')
ax.plot(time_range , s2 , linestyle = '-.' , color = 'tab:blue' , marker = 'o' , label = 'prediction')
ax.xaxis.set_tick_params(labelsize=21)
ax.yaxis.set_tick_params(labelsize=21)
ax.legend(prop={'size': 31},edgecolor='red', facecolor='#e8dfdf')
plt.savefig(pic_name)
plt.close(fig)
pass
def plot_train_epoch_loss(
result:dict,
path: str,
time: str
):
epoches = list(result.keys())
loss = list(result.values())
fig = plt.figure(figsize=(16,12))
ax = fig.add_subplot(1, 1, 1)
ax.plot(epoches, loss, color='tab:blue' , marker='D')
font = {
'family': 'Times New Roman',
'weight': 'normal',
'size': 30,
}
title_font ={
'family': 'Times New Roman',
'weight': 'bold',
'size': 40,
}
ax.set_xlabel('epoch(s)' ,font )
ax.set_ylabel('loss(MSE / filter negative log likelihood)' , font)
ax.set_title('epoch information when training' ,title_font )
ax.xaxis.set_tick_params(labelsize=21)
ax.yaxis.set_tick_params(labelsize=21)
plt.savefig(os.path.join(path , 'train_%s.pdf'%(time)),format="pdf")
plt.close(fig)
def complete_batch(batch,batch_size):
"""
complete the batch withoug enough samples
"""
# deepcopy
completed_batch = batch.copy()
if len(completed_batch[FieldName.START]) == batch_size:
return completed_batch , batch_size
for attr in completed_batch.keys():
batch_value = completed_batch[attr]
if isinstance(batch_value , list):
batch_num = len(batch_value)
completed_batch[attr] = batch_value + [batch_value[-1]]*(batch_size-batch_num)
elif isinstance(batch_value, np.ndarray):
if len(batch_value.shape) != TARGET_DIM:
batch_num = batch_value.shape[0]
complete_shape = [batch_size-batch_num]+list(batch_value.shape[1:])
completed_batch[attr] = np.concatenate([batch_value , np.zeros(shape=complete_shape)], axis=0)
else:
batch_num = batch_value.shape[1]
complete_shape = [batch_value.shape[0],batch_size - batch_num] + list(batch_value.shape[2:])
completed_batch[attr] = np.concatenate([batch_value, np.zeros(shape=complete_shape)], axis=1)
return completed_batch ,batch_num
def del_previous_model_params(path):
'''
delete previous model parameters
'''
if not os.path.isdir(path):
return
for file in os.listdir(path):
if file.endswith(".data-00000-of-00001") \
or file.endswith(".index") \
or file.endswith(".meta"):
os.remove(os.path.join(path,file))
def get_model_params_name(path):
'''
get params name
'''
if not os.path.isdir(path):
return
for file in os.listdir(path):
if file.endswith(".data-00000-of-00001") \
or file.endswith(".index") \
or file.endswith(".meta"):
params_name = os.path.splitext(file)[0]
return params_name
def samples_with_mean_cov(mean: np.ndarray , cov:np.ndarray , num_samples: int):
'''
:param mean: (ssm_num, bs, pred, dim_z)
:param cov: (ssm_num, bs, pred, dim_z , dim_z)
:return: samples (ssm_num ,bs,num_samples, pred, dim_z)
'''
result = np.zeros(shape=mean.shape)
result = np.tile(np.expand_dims(result,0),[100]+[1]*len(result.shape))#(samples, ssm ,bs, pred, dim_z)
for i in range(mean.shape[0]):
for j in range(mean.shape[1]):
for k in range(mean.shape[2]):
samples = np.random.multivariate_normal(mean[i,j,k] , cov[i,j,k] , size=num_samples)
result[: , i, j , k] = samples
# print(samples.shape)
result = np.transpose(result ,[1,2,0,3,4])
return result
pass
def get_reload_hyper(path, config):
'''
reload config
'''
abbre = {
'freq' : 'freq',
'env' : 'environment',
'lags' : 'maxlags',
'past' : 'past_length',
'pred' : 'pred_length',
'u' : 'dim_u',
'l' : 'dim_l',
'K' : 'K',
'T' : ['time_exact_layers' , 'time_exact_cells'],
'E' : ['env_exact_layers' , 'env_exact_cells'],
'α' : 'alpha_units',
'epoch' : 'epochs',
'bs' : 'batch_size',
'bn' : 'num_batches_per_epoch',
'lr' : 'learning_rate' ,
'initKF' : 'init_kf_matrices',
'dropout':'dropout_rate'
}
not_convert_to_int = ['freq' ,'env' , 'T' , 'E']
convert_to_float = ['lr' , 'initKF' , 'dropout']
hyper_parameters = path.split('_');
for parameter in hyper_parameters:
index_of_left_embrace = parameter.index('(')
index_of_right_embrace = parameter.index(')')
name = parameter[:index_of_left_embrace]
value = parameter[index_of_left_embrace+1 : index_of_right_embrace]
if name in convert_to_float :
value = float(value)
elif name not in not_convert_to_int:
value = int(value)
else:
#维持原样就好了
pass
#单参数
if isinstance(abbre[name] , str):
config.__setattr__(abbre[name] , value)
#多参数,形容LSTM
else:
value_split = value.split('-')
for i in range(len(abbre[name])):
config.__setattr__(abbre[name][i] , int(value_split[i]))
return config
| joelongLin/Shared_SSM | gluonts/lzl_shared_ssm/utils/tool_func.py | tool_func.py | py | 14,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.tseries.offsets.YearOffset",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pandas.tseries.offsets",
"line_number": 50,
"usage_type": "name"
},
{
"api_n... |
8158101065 | """Parse the YAML configuration."""
import logging
from pathlib import Path
from typing import Optional, Dict, Callable, Tuple, Union
import yaml
import torch
from . import utils
logger = logging.getLogger("casanovo")
class Config:
"""The Casanovo configuration options.
If a parameter is missing from a user's configuration file, the default
value is assumed.
Parameters
----------
config_file : str, optional
The provided user configuration file.
Examples
--------
```
config = Config("casanovo.yaml")
config.n_peaks # the n_peaks parameter
config["n_peaks"] # also the n_peaks parameter
```
"""
_default_config = Path(__file__).parent / "config.yaml"
_config_types = dict(
random_seed=int,
n_peaks=int,
min_mz=float,
max_mz=float,
min_intensity=float,
remove_precursor_tol=float,
max_charge=int,
precursor_mass_tol=float,
isotope_error_range=lambda min_max: (int(min_max[0]), int(min_max[1])),
min_peptide_len=int,
dim_model=int,
n_head=int,
dim_feedforward=int,
n_layers=int,
dropout=float,
dim_intensity=int,
max_length=int,
residues=dict,
n_log=int,
tb_summarywriter=str,
warmup_iters=int,
max_iters=int,
learning_rate=float,
weight_decay=float,
train_batch_size=int,
predict_batch_size=int,
n_beams=int,
top_match=int,
max_epochs=int,
num_sanity_val_steps=int,
train_from_scratch=bool,
save_model=bool,
model_save_folder_path=str,
save_weights_only=bool,
every_n_train_steps=int,
no_gpu=bool,
)
def __init__(self, config_file: Optional[str] = None):
"""Initialize a Config object."""
self.file = str(config_file) if config_file is not None else "default"
with self._default_config.open() as f_in:
self._params = yaml.safe_load(f_in)
if config_file is None:
self._user_config = {}
else:
with Path(config_file).open() as f_in:
self._user_config = yaml.safe_load(f_in)
# Validate:
for key, val in self._config_types.items():
self.validate_param(key, val)
# Add extra configuration options and scale by the number of GPUs.
n_gpus = 0 if self["no_gpu"] else torch.cuda.device_count()
self._params["n_workers"] = utils.n_workers()
if n_gpus > 1:
self._params["train_batch_size"] = (
self["train_batch_size"] // n_gpus
)
def __getitem__(self, param: str) -> Union[int, bool, str, Tuple, Dict]:
"""Retrieve a parameter"""
return self._params[param]
def __getattr__(self, param: str) -> Union[int, bool, str, Tuple, Dict]:
"""Retrieve a parameter"""
return self._params[param]
def validate_param(self, param: str, param_type: Callable):
"""Verify a parameter is the correct type.
Parameters
----------
param : str
The Casanovo parameter
param_type : Callable
The expected callable type of the parameter.
"""
try:
param_val = self._user_config.get(param, self._params[param])
if param == "residues":
residues = {
str(aa): float(mass) for aa, mass in param_val.items()
}
self._params["residues"] = residues
elif param_val is not None:
self._params[param] = param_type(param_val)
except (TypeError, ValueError) as err:
logger.error(
"Incorrect type for configuration value %s: %s", param, err
)
raise TypeError(
f"Incorrect type for configuration value {param}: {err}"
)
def items(self) -> Tuple[str, ...]:
"""Return the parameters"""
return self._params.items()
| Noble-Lab/casanovo | casanovo/config.py | config.py | py | 4,091 | python | en | code | 75 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"... |
17260671113 | from scipy import stats
import numpy as np
import csv
sentiment_scores = np.random.random(52)
# input should be a numpy array of sentiment scores for the 50 states
def calculateRegressions(sentiment_scores):
white_ratios = []
male_ratios = []
educations = []
populations = []
incomes = []
criteria = ("white ratio", "male ratio", "education level", "population", "median household income")
r_values = []
extents_of_correlation = []
demographics = [("white ratio", white_ratios, 1),
("male ratio", male_ratios, 2),
("education level", educations, 6),
("population", populations, 7),
("median household income", incomes, 8)]
with open('USdemographics_by_state.csv', 'r') as file:
reader = csv.reader(file)
# skip header
next(reader)
for row in reader:
for (_, data, col) in demographics:
num = row[col].replace(",", "", 10)
num = num.replace("$", "")
data.append(float(num))
correlation = [[0.3, "not"],
[0.5, "weakly"],
[0.7, "moderately"],
[1.0, "strongly"]]
for (colname, data, _) in demographics:
slope, intercept, r_value, p_value, std_err = stats.linregress(sentiment_scores, data)
# print for debugging purposes
# print(colname + " r value: " + str(r_value))
r_values.append(str(r_value))
abs_r = abs(r_value)
for (num, extent) in correlation:
if abs_r < num:
# print for debugging purposes
# print(extent + " correlated")
extents_of_correlation.append(extent + " correlated")
break
file.close()
# output is a tuple of 3 tuples of strings
return (criteria, tuple(r_values), tuple(extents_of_correlation))
print(calculateRegressions(sentiment_scores)) | yongzheng9405/-TwitterSentimentAnalysisProject | scripts/regression.py | regression.py | py | 1,673 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.random",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.stats.linregress... |
8473334100 | """To run this: $ python3 parse.py test.xml
The script will pase a XML file and print its node tags.
Compatible with Python 3; changing the print statements should make this
compatible with Python 2.
"""
import sys
# http://docs.python.org/library/xml.etree.elementtree.html
from xml.etree import ElementTree
def operation(node):
"""Just a sample function that prints the tag of a node."""
return print(node.tag)
def recur_node(node, f):
"""Applies function f on given node and goes down recursively to its
children.
Keyword arguments:
node - the root node
f - function to be applied on node and its children
"""
if node != None:
f(node)
for item in node.getchildren():
recur_node(item, f)
else:
return 0
def main(fileName):
try:
root = ElementTree.parse(fileName).getroot()
except:
# there should be some proper exception handling here
return -1
return recur_node(root, operation)
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
###
###
| babywyrm/sysadmin | pyth3/xml/print_node_tags_.py | print_node_tags_.py | py | 1,104 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.argv",... |
1243606709 | import pygame as pg
from numpy import zeros
from constants import consts as c
from id_mapping import id_map, reverse_id_map
class World:
def __init__(self):
self.grid = zeros((c.num_cells, c.num_cells), dtype=int)
self.ore_locations = []
self.grid[10:13, 10:12] = id_map["coal"]
self.grid[15:17, 15:17] = id_map["iron_ore"]
self.grid[15:17, 20:23] = id_map["copper_ore"]
self.populate_ore_locations()
def populate_ore_locations(self):
for row in range(c.num_cells):
for col in range(c.num_cells):
if self.grid[row, col] > 0:
self.ore_locations.append((row, col))
def render(self):
for loc in self.ore_locations:
row, col = loc
x = col * c.cell_length - c.player_x
y = row * c.cell_length - c.player_y
pg.draw.rect(c.screen, c.ore_colors[self.grid[loc]], (x, y, c.cell_length, c.cell_length))
def render_tooltip(self, row, col):
x, y = pg.mouse.get_pos()
ore = reverse_id_map[self.grid[row, col]].replace("_", " ").title()
ore_text = c.merriweather.render(ore, True, pg.Color("white"))
pg.draw.rect(c.screen, pg.Color("black"), (x + 10, y + 10, ore_text.get_width() + 20, ore_text.get_height() + 20))
c.screen.blit(ore_text, (x + 20, y + 20))
world = World() | chanrt/py-factory | world.py | world.py | py | 1,382 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "constants.consts.num_cells",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "constants.consts",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "id_mappi... |
42147526124 | import copy
import logging
import traceback
from typing import Any, Dict, Optional, List
from .base_trigger import TriggerEvent, BaseTrigger
from .playbook_utils import merge_global_params
from .playbooks_event_handler import PlaybooksEventHandler
from ..model.events import ExecutionBaseEvent
from ..reporting.base import Finding
from ...model.playbook_action import PlaybookAction
from ...model.config import Registry
from .trigger import Trigger
class PlaybooksEventHandlerImpl(PlaybooksEventHandler):
def __init__(self, registry: Registry):
self.registry = registry
def handle_trigger(self, trigger_event: TriggerEvent) -> Optional[Dict[str, Any]]:
playbooks = self.registry.get_playbooks().get_playbooks(trigger_event)
if not playbooks: # no registered playbooks for this event type
return
execution_response = None
execution_event: Optional[ExecutionBaseEvent] = None
findings: Dict[str, Finding] = {}
for playbook in playbooks:
fired_trigger = self.__get_fired_trigger(trigger_event, playbook.triggers)
if fired_trigger:
execution_event = fired_trigger.build_execution_event(
trigger_event, findings
)
if execution_event: # might not exist for unsupported k8s types
execution_event.named_sinks = (
playbook.sinks
if playbook.sinks is not None
else self.registry.get_playbooks().get_default_sinks()
)
playbook_resp = self.__run_playbook_actions(
execution_event,
playbook.get_actions(),
)
if (
playbook_resp
): # For now, only last response applies. (For simplicity reasons)
execution_response = playbook_resp
if playbook.stop or execution_event.stop_processing:
break
if execution_event:
self.__handle_findings(execution_event)
return execution_response
def run_actions(
self,
execution_event: ExecutionBaseEvent,
actions: List[PlaybookAction],
) -> Optional[Dict[str, Any]]:
if execution_event.named_sinks is None:
execution_event.named_sinks = (
self.registry.get_playbooks().get_default_sinks()
)
execution_response = self.__run_playbook_actions(
execution_event,
actions,
)
self.__handle_findings(execution_event)
return execution_response
def __prepare_execution_event(self, execution_event: ExecutionBaseEvent):
execution_event.set_scheduler(self.registry.get_scheduler())
def run_external_action(
self,
action_name: str,
action_params: Optional[dict],
sinks: Optional[List[str]],
) -> Optional[Dict[str, Any]]:
action_def = self.registry.get_actions().get_action(action_name)
if not action_def:
return self.__error_resp(f"External action not found {action_name}")
if not action_def.from_params_func:
return self.__error_resp(
f"Action {action_name} cannot run using external event"
)
if sinks:
if action_params:
action_params["named_sinks"] = sinks
else:
action_params = {"named_sinks": sinks}
try:
instantiation_params = action_def.from_params_parameter_class(
**action_params
)
except Exception:
return self.__error_resp(
f"Failed to create execution instance for"
f" {action_name} {action_def.from_params_parameter_class}"
f" {action_params} {traceback.format_exc()}"
)
execution_event = action_def.from_params_func(instantiation_params)
if not execution_event:
return self.__error_resp(
f"Failed to create execution event for "
f"{action_name} {action_params}"
)
playbook_action = PlaybookAction(
action_name=action_name, action_params=action_params
)
return self.run_actions(execution_event, [playbook_action])
@classmethod
def __error_resp(cls, msg: str) -> dict:
logging.error(msg)
return {"success": False, "msg": msg}
def __run_playbook_actions(
self,
execution_event: ExecutionBaseEvent,
actions: List[PlaybookAction],
) -> Dict[str, Any]:
self.__prepare_execution_event(execution_event)
execution_event.response = {"success": True}
for action in actions:
if execution_event.stop_processing:
return execution_event.response
registered_action = self.registry.get_actions().get_action(
action.action_name
)
if (
not registered_action
): # Might happen if manually trying to trigger incorrect action
msg = f"action {action.action_name} not found. Skipping for event {type(execution_event)}"
execution_event.response = self.__error_resp(msg)
continue
if not isinstance(execution_event, registered_action.event_type):
msg = f"Action {action.action_name} requires {registered_action.event_type}"
execution_event.response = self.__error_resp(msg)
continue
if not registered_action.params_type:
registered_action.func(execution_event)
else:
action_params = None
try:
action_params = merge_global_params(
self.get_global_config(), action.action_params
)
params = registered_action.params_type(**action_params)
except Exception:
msg = (
f"Failed to create {registered_action.params_type} "
f"using {action_params} for running {action.action_name} "
f"exc={traceback.format_exc()}"
)
execution_event.response = self.__error_resp(msg)
continue
registered_action.func(execution_event, params)
return execution_event.response
@classmethod
def __get_fired_trigger(
cls, trigger_event: TriggerEvent, playbook_triggers: List[Trigger]
) -> Optional[BaseTrigger]:
for trigger in playbook_triggers:
if trigger.get().should_fire(trigger_event):
return trigger.get()
return None
def __handle_findings(self, execution_event: ExecutionBaseEvent):
for finding in execution_event.findings.values():
for sink_name in execution_event.named_sinks:
try:
sink = self.registry.get_sinks().sinks.get(sink_name)
if not sink:
logging.error(
f"sink {sink_name} not found. Skipping event finding {finding}"
)
continue
# create deep copy, so that iterating on one sink won't affect the others
finding_copy = copy.deepcopy(finding)
sink.write_finding(finding_copy, self.registry.get_sinks().platform_enabled)
except Exception: # Failure to send to one sink shouldn't fail all
logging.error(
f"Failed to publish finding to sink {sink_name}", exc_info=True
)
def get_global_config(self) -> dict:
return self.registry.get_playbooks().get_global_config()
| m8e/robusta | src/robusta/core/playbooks/playbooks_event_handler_impl.py | playbooks_event_handler_impl.py | py | 8,001 | python | en | code | null | github-code | 36 | [
{
"api_name": "playbooks_event_handler.PlaybooksEventHandler",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "model.config.Registry",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "base_trigger.TriggerEvent",
"line_number": 20,
"usage_type": "name"
... |
25458093560 | """Analyze word-level LIWC scores between lucid and non-lucid dreams.
***Note this does not re-run LIWC, but loads in prior results.
IMPORTS
=======
- original post info, dreamviews-posts.tsv
- word-level LIWC scores, validate-liwc_wordscores.tsv
- LIWC dictionary, dictionaries/custom.dic
EXPORTS
=======
- effect sizes (d) for top words from each category, validate-liwc_wordscores-stats.tsv
Some of this is copy/pasted from the general liwc_stats script,
but this is way messier so better alone.
1. only a subset
2. need to load in numpy stuff
3. need to look up vocabs in whole lexicon for subsetting
4. don't need all the summary statistic details
5. don't want to run both stats on this, too out of hand just pick one
6. export is different, here it's the top N words of a few categories and their effect sizes
"""
import liwc
import numpy as np
import pandas as pd
import pingouin as pg
from scipy import sparse
import tqdm
import config as c
################################################################################
# SETUP
################################################################################
LIWC_CATEGORIES = ["insight", "agency"]
top_n = 20 # Top n contributing tokens/words for each category.
import_path_dict = c.DATA_DIR / "dictionaries" / "custom.dic"
import_path_data = c.DATA_DIR / "derivatives" / "validate-liwc_wordscores-data.npz"
import_path_attr = c.DATA_DIR / "derivatives" / "validate-liwc_wordscores-attr.npz"
export_path = c.DATA_DIR / "derivatives" / "validate-liwc_wordscores-stats.tsv"
#### load in the original posts file to get attributes lucidity and user_id
# and drop un-labeled posts.
# merge the clean data file and all its attributes with the liwc results
posts = c.load_dreamviews_posts()
posts = posts.set_index("post_id")[["user_id", "lucidity"]]
posts = posts[ posts["lucidity"].str.contains("lucid") ]
# #### load prior full LIWC results (i.e., category results)
# liwccats = pd.read_csv(import_fname_liwc, sep="\t", encoding="utf-8",
# index_col="category", usecols=["category", "cohen-d"], squeeze=True)
# liwccats = liwccats.loc[LIWC_CATEGORIES]
#### load in dictionary lexicon
# and flip key/value from token/category to category/word_list
# and only for the liwc categories of interest
lexicon, _ = liwc.read_dic(import_fname_dict)
wordlists = { c: [ t for t, cats in lexicon.items() if c in cats ]
for c in LIWC_CATEGORIES }
# and grab a subset of all the vocab so to reduce unnecessary memory later
vocab = set([ t for wlist in wordlists.values() for t in wlist ])
#### load in numpy arrays of token scores and generate dataframe
sparse_matr_attributes = np.load(import_fname_attr, allow_pickle=True)
tokens = sparse_matr_attributes["token"]
token_index = [ t in vocab for t in tokens ]
relevant_tokens = tokens[token_index]
sparse_matr = sparse.load_npz(import_fname_data)
scores = pd.DataFrame(sparse_matr[:,token_index].toarray(),
columns=relevant_tokens,
index=sparse_matr_attributes["post_id"])
# merge the post attributes with LIWC token scores
df = posts.join(scores, how="left")
assert len(df) == len(posts)
################################################################################
# STATISTICAL TESTS
################################################################################
# Average the LD and NLD scores of each token for each user.
# Some users might not have both dream types and they'll be removed.
avgs = df.groupby(["user_id", "lucidity"]
).mean().rename_axis(columns="token"
).pivot_table(index="user_id", columns="lucidity"
).dropna()
# We already have only relevant tokens, so get effect
# sizes for all of them.
effectsize_results = []
for tok in tqdm.tqdm(relevant_tokens, desc="stats on word-level LIWC scores"):
ld, nld = avgs[tok][["lucid", "nonlucid"]].T.values
stats = {}
stats["cohen-d"] = pg.compute_effsize(ld, nld, paired=True, eftype="cohen")
stats["cohen-d_lo"], stats["cohen-d_hi"] = pg.compute_bootci(ld, nld,
paired=True, func="cohen", method="cper",
confidence=.95, n_boot=2000, decimals=4)
effectsize_results.append(pd.DataFrame(stats, index=[tok]))
es_df = pd.concat(effectsize_results).rename_axis("token")
# Each LIWC category will utilize a different set of tokens/words.
# Find the top N contributors to the overall effect.
# Find the direction of overall effect by looking at previous LIWC category output.
token_rank_results = []
for cat in LIWC_CATEGORIES:
# generate an index of relevant tokens for this category
cat_index = es_df.index.map(lambda t: t in wordlists[cat])
# extract only rows in this category
df_ = es_df.loc[cat_index]
# # get direction of effect and remove rows not in line
# d_sign = np.sign(liwccats.loc[cat])
# if d_sign > 0:
# df_ = df_[ df_["cohen-d"] > 0 ]
# else:
# df_ = df_[ df_["cohen-d"] < 0 ]
# sort by effect size
df_ = df_.sort_values("cohen-d", ascending=False, key=abs)
# take top rows and clean up a bit
df_ = df_[:TOP_N]
df_[f"{cat}_rank"] = np.arange(TOP_N) + 1
token_rank_results.append(df_)
out = pd.concat(token_rank_results)
# Export.
out.to_csv(export_path, float_format="%.4f", index=True, na_rep="NA", sep="\t", encoding="utf-8")
| remrama/dreamviews | validate-liwc_word_stats.py | validate-liwc_word_stats.py | py | 5,290 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.DATA_DIR",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "config.DATA_DIR",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "config.DATA_DIR",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "config... |
29344589576 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
#Created by 'quanpower' on '15-4-4'
#Email:quanpower@gmail.com
#QQ:252527676
#Site:www.smartlinkcloud.com
__author__ = 'quanpower'
import numpy as np
import cv2
#获得视频的格式
videoCapture =cv2.VideoCapture('../files/video_test.rmvb')
#获得码率及尺寸
fps = videoCapture.get(cv2.cv.CV_CAP_PROP_FPS)
size = (int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
#指定写视频的格式, I420-avi, MJPG-mp4
videoWriter = cv2.VideoWriter('oto_other.mp4', cv2.cv.CV_FOURCC('M', 'J', 'P', 'G'), fps, size)
#读帧
success, frame = videoCapture.read()
while success :
cv2.imshow('Oto Video', frame) #显示
cv2.waitKey(1000/int(fps)) #延迟
videoWriter.write(frame) #写视频帧
success, frame = videoCapture.read() #获取下一帧
if cv2.waitKey(1)&0xFF == ord('q'):
break
videoCapture.release()
cv2.destroyAllWindows()
| SmartHomeRobot/OpenCV | src/cv_video_play_rmvb.py | cv_video_play_rmvb.py | py | 982 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.cv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.cv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.cv",
"line_number": ... |
74224406183 | import datetime
import logging
import numpy as np
import os
import skimage.io as io
from sklearn.cluster import KMeans
from skimage import img_as_float
from skimage.color import rgb2lab
from skimage.exposure import equalize_adapthist
from skimage.filters import gaussian
from skimage.transform import resize
from src.data_loader import sample_names, images, root_dir, FOLDER_EXPERIMENTS
from src.utils import apply_on_normalized_luminance, colormap, outline_regions, average_color
MAX_PATIENTS = 1
MAX_IMAGES_PER_PATIENT = 1
MAX_PATCHES_PER_IMAGE = 2
RESIZE_IMAGES = None # (300, 300) # None to deactivate
if __name__ == "__main__":
execution_id = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
results_dir = root_dir(FOLDER_EXPERIMENTS(version=3), execution_id)
os.makedirs(results_dir, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
handlers=[
logging.FileHandler(os.path.join(results_dir, 'log.txt')),
logging.StreamHandler()
]
)
p_names = sample_names()
for idx_p, p_name in enumerate(p_names[0:MAX_PATIENTS]):
for idx_img, (path_image, image) in enumerate(images(patient_name=p_name, max_images=MAX_IMAGES_PER_PATIENT)):
results_p_dir = os.path.join(results_dir, p_name, str(idx_img))
os.makedirs(results_p_dir, exist_ok=True)
logging.info(f'Processing: {p_name}-{idx_img}')
# 01 Preprocessing
###
if RESIZE_IMAGES:
logging.info('Resizing image')
image = resize(img_as_float(image), RESIZE_IMAGES + (3, ))
io.imsave(fname=os.path.join(results_p_dir, '01 01 Original.jpg'),
arr=image)
logging.info('Gaussian filter')
image = apply_on_normalized_luminance(
operation=gaussian,
image_rgb=image)
io.imsave(fname=os.path.join(results_p_dir, '01 02 Gaussian filter.jpg'),
arr=image)
logging.info('CLAHE')
image = apply_on_normalized_luminance(
lambda img: equalize_adapthist(img, clip_limit=0.02),
image_rgb=image)
io.imsave(fname=os.path.join(results_p_dir, '01 03 CLAHE.jpg'),
arr=image)
# 02 Lab visualization
###
image_lab = rgb2lab(image)
luminance = image_lab[:, :, 0]
a = np.min(luminance)
b = np.max(luminance - a)
luminance = (luminance - a) / b
io.imsave(fname=os.path.join(results_p_dir, '02 01 Luminance.png'),
arr=luminance)
channel_a = image_lab[:, :, 1]
a = np.min(channel_a)
b = np.max(channel_a - a)
channel_a = (channel_a - a) / b
io.imsave(fname=os.path.join(results_p_dir, '02 01 channel_a.png'),
arr=channel_a)
channel_b = image_lab[:, :, 2]
a = np.min(channel_b)
b = np.max(channel_b - a)
channel_b = (channel_b - a) / b
io.imsave(fname=os.path.join(results_p_dir, '02 01 channel_b.png'),
arr=channel_b)
# 03-04 Lab separation
###
logging.info('Positive separation (k-means clustering on `a channel`)')
channel_a = image_lab[:, :, 1]
clustering = KMeans(n_clusters=2, random_state=0).fit(channel_a.reshape(-1, 1))
idx_positive_cluster = np.argmax(clustering.cluster_centers_)
positive_mask = np.equal(clustering.labels_, idx_positive_cluster).reshape(channel_a.shape[0:2])
io.imsave(fname=os.path.join(results_p_dir, f'03 K-means - Positives - Mask.jpg'),
arr=colormap(positive_mask))
io.imsave(fname=os.path.join(results_p_dir, f'03 K-means - Positives - regions.jpg'),
arr=outline_regions(image=image, region_labels=positive_mask))
io.imsave(fname=os.path.join(results_p_dir, f'03 K-means - Positives - average color.jpg'),
arr=average_color(image=image, region_labels=positive_mask))
logging.info('Negative separation (k-means clustering on `b channel`)')
channel_b = image_lab[:, :, 2]
nonpositive_mask = np.logical_not(positive_mask)
clustering = KMeans(n_clusters=2, random_state=0).fit(channel_b[nonpositive_mask].reshape(-1, 1))
idx_negative_cluster = np.argmin(clustering.cluster_centers_)
negative_mask_wrt_nonpositive = np.equal(clustering.labels_, idx_negative_cluster)
negative_mask = np.full(shape=positive_mask.shape, dtype=bool, fill_value=False)
nonpositive_mask_as_flatten_idcs = np.where(nonpositive_mask.flatten())
negative_mask_wrt_nonpositive_flatten = nonpositive_mask_as_flatten_idcs[0][negative_mask_wrt_nonpositive]
negative_mask = negative_mask.flatten()
negative_mask[negative_mask_wrt_nonpositive_flatten] = True
negative_mask = negative_mask.reshape(positive_mask.shape)
io.imsave(fname=os.path.join(results_p_dir, f'04 K-means - Negatives - Mask.jpg'),
arr=colormap(negative_mask))
io.imsave(fname=os.path.join(results_p_dir, f'04 K-means - Negatives - regions.jpg'),
arr=outline_regions(image=image, region_labels=negative_mask))
io.imsave(fname=os.path.join(results_p_dir, f'04 K-means - Negatives - average color.jpg'),
arr=average_color(image=image, region_labels=negative_mask))
# 05 Results visualization
###
region_labels = negative_mask + 2*positive_mask
io.imsave(fname=os.path.join(results_p_dir, '05 Results - labels.jpg'),
arr=colormap(region_labels))
io.imsave(fname=os.path.join(results_p_dir, '05 Results - regions.jpg'),
arr=outline_regions(image=image, region_labels=region_labels))
io.imsave(fname=os.path.join(results_p_dir, '05 Results - average_color.jpg'),
arr=average_color(image=image, region_labels=region_labels))
| AntoineRouland/ki67 | src/v3_Lab_separation/run_v3.py | run_v3.py | py | 6,283 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "src.data_loader.root_dir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": ... |
43145801264 | # -*- encoding: utf-8 -*-
import os
import time
import pyautogui as auto
import PyHook3 as pyhook
import pythoncom
from PIL import Image
from pytesseract import image_to_string
from googletrans import Translator
import asyncio
import tkinter as tk
# 引入字体模块
import tkinter.font as tkFont
from concurrent.futures import ThreadPoolExecutor
translator = Translator(service_urls=['translate.google.cn'])
px=-1000
py=-1000
x=-1000
y=-1000
last=0
root = tk.Tk("Advise")
root.overrideredirect(True)
sw=root.winfo_screenwidth()
sh=root.winfo_screenheight()
root_x=sw-200
root_y=sh-120
root.geometry("200x50+%d+%d"%(root_x,root_y))
root.wm_attributes('-topmost',1)
root.wm_attributes('-alpha',0.15)
font = tkFont.Font(family="Microsoft YaHei",size=9);
english = tk.Label(root,text="English",font=font)
english.pack()
chinese = tk.Label(root,text="Chinese",font=font)
chinese.pack()
async def translateZh(text):
global translator,english
text=text.replace("\n"," ")
english['text']=translator.translate(text,src="zh-cn",dest="en").text
await asyncio.sleep(0.5)
async def translateEn(text):
global translator
text=text.replace("\n"," ")
chinese['text']=translator.translate(text,src="en",dest="zh-cn").text
await asyncio.sleep(0.5)
async def translate(text):
global root
task1 = asyncio.create_task(translateEn(text))
task2 = asyncio.create_task(translateZh(text))
await task1
await task2
root.wm_attributes('-alpha',0.4)
root.update()
await hide(5)
async def hide(delay):
global root
await asyncio.sleep(delay)
root.wm_attributes('-alpha',0)
root.update()
def getClickPosition(_x,_y):
global x,y,px,py
px=x;py=y;
x=_x;y=_y;
l=min(x,px)
t=min(y,py)
w=abs(px-x)
h=abs(py-y)
if w<1000 and h<1000:
auto.screenshot("temp.png",region=(l,t,w,h))
img=Image.open("temp.png")
img=img.convert("L")
text = image_to_string(img,"chi_sim+eng")
if text:
asyncio.run(translate(text))
x=-1000
y=-1000
def onMouseEvent(event):
try:
global last,loop
if event.Message==514:
# Judge Double Click
if event.Time-last<300:
getClickPosition(event.Position[0],event.Position[1])
last=event.Time
return True
except KeyboardInterrupt:
exit(0)
def main():
hm = pyhook.HookManager()
hm.MouseAll = onMouseEvent
hm.HookMouse()
pythoncom.PumpMessages()
if __name__ == "__main__":
main()
| minskiter/capword | index.py | index.py | py | 2,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "googletrans.Translator",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.font.Font",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tkinter.font",
... |
72909209064 | # Объедините функции из прошлых задач.
# Функцию угадайку задекорируйте:
# ○ декораторами для сохранения параметров,
# ○ декоратором контроля значений и
# ○ декоратором для многократного запуска.
# Выберите верный порядок декораторов.
from typing import Callable
from random import randint
import os
import json
def check_parametr(func: Callable):
MIN_NUM = 1
MAX_NUM = 100
MIN_COUNT = 1
MAX_COUNT =10
def wrapper(num: int, count: int, *args, **kwargs):
if num > MAX_NUM or num < MIN_NUM:
num = randint(MIN_NUM, MAX_NUM)
if count > MAX_COUNT or count < MIN_COUNT:
count = randint(MIN_COUNT, MAX_COUNT)
# print(num, count)
result = func(num, count, *args, **kwargs)
return result
return wrapper
def logger(func: Callable):
file_name = f'{func.__name__}.json'
if os.path.exists(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
else:
data = []
def wrapper(*args, **kwargs):
json_dict = {'args': args, **kwargs}
result = func(*args, **kwargs)
json_dict['result'] = result
data.append(json_dict)
with open(file_name, 'w', encoding='utf-8') as f1:
json.dump(data, f1)
return result
return wrapper
def count_f(num: int = 1):
def deco(func: Callable):
results = []
def wrapper(*args, **kwargs):
for i in range(num):
results.append(func(*args, **kwargs))
return results
return wrapper
return deco
@count_f(2)
@check_parametr
@logger
def gess_number(num: int, count: int) -> Callable[[], None]:
for i in range(1, count + 1):
num_input = int(input(f'Угадайте число от 1 до 100, у Вас {count + 1 - i} попыток: '))
print(f'Попытка № {i}')
if num_input == num:
return 'Вы угадали!'
elif num_input < num:
print('Загаданное число больше')
else:
print('Загаданное число меньше')
return 'Вы не угадали!'
if __name__ == '__main__':
game = gess_number(25, 5)
print(game)
| TatSoz/Python_GB | Sem_9/task_05.py | task_05.py | py | 2,456 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "typing.Callable",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"... |
27102703139 | import os
import re
import logging
import json
from datetime import datetime, timedelta
from pathlib import Path
import requests
import spotipy
from spotipy import SpotifyOAuth
from spotipy.oauth2 import SpotifyClientCredentials
from dagster_cron import SystemCronScheduler
from dagster import (
RepositoryDefinition,
ScheduleDefinition,
pipeline,
schedules,
solid,
)
from env import FEEDLY_REFRESH_TOKEN, SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET, SPOTIPY_REDIRECT_URI
class FeedlyApiRequestError(Exception):
pass
class MusicFeeder:
scopes = " ".join([
"ugc-image-upload",
"user-read-playback-state",
"user-modify-playback-state",
"user-read-currently-playing",
"streaming",
"app-remote-control",
"user-read-email",
"user-read-private",
"playlist-read-collaborative",
"playlist-modify-public",
"playlist-read-private",
"playlist-modify-private",
"user-library-modify",
"user-library-read",
"user-top-read",
"user-read-recently-played",
"user-follow-read",
"user-follow-modify"
])
feedly_username = '5066bdab-2d04-4094-b7ac-cd19047daffe'
spotify_username = "21bpmv7tcs4av5sakwfxkof2a"
spotify_playlist_id = "6qsodHkxMB36VhWfIwvylQ"
stop_words = ["MV"]
def __init__(self):
# feedly setup
self.feedly_access_token = self._get_feedly_access_token()
# spotify setup
self.cache_path = Path(__file__).parent / f".cache-{self.spotify_username}"
self.sp_oauth = SpotifyOAuth(
SPOTIPY_CLIENT_ID,
SPOTIPY_CLIENT_SECRET,
SPOTIPY_REDIRECT_URI,
scope=self.scopes,
cache_path=self.cache_path,
username=self.spotify_username
)
self.token_info = self.sp_oauth.get_cached_token()
self.token = self.token_info["access_token"]
self.spotify = spotipy.Spotify(auth=self.token)
self.titles, self.tracks, self.current_tracks, self.tracks_to_add = None, None, None, None
def execute(self):
self.titles = self._get_titles()
# extract latest 30 titles
self.titles = self.titles[:30]
self.tracks = self._get_taget_tracks(self.titles)
self.current_tracks = self._get_current_tracks()
self.tracks_to_add = self._get_tracks_to_add(self.tracks, self.current_tracks)
self._add_to_spotify(self.tracks_to_add)
def _get_feedly_access_token(self):
url = "https://cloud.feedly.com/v3/auth/token"
payload = dict(
refresh_token=FEEDLY_REFRESH_TOKEN,
client_id="feedlydev",
client_secret="feedlydev",
grant_type="refresh_token"
)
resp = requests.post(url, json=payload).json()
return resp['access_token']
def _request_to_feedly(self, url, params={}):
headers = {'Authorization': f"OAuth {self.feedly_access_token}"}
resp = requests.get(url, headers=headers, params=params)
if resp.status_code != 200:
raise FeedlyApiRequestError(json.dumps(resp.json()))
return resp.json()
def _get_titles(self):
url = f"https://cloud.feedly.com/v3/streams/contents?streamId=user/{self.feedly_username}/category/Music"
params = dict(unreadOnly=False, count=100)
resp = self._request_to_feedly(url, params)
return [item['title'] for item in resp['items'] if item['title']]
def _get_taget_tracks(self, titles):
one_week_ago = datetime.today() - timedelta(days=7)
tracks = []
for title in titles:
terms = re.findall("[a-zA-Z ]+", title)
terms = [term.strip() for term in terms if term.strip()]
terms = [term for term in terms if term not in self.stop_words]
query = " ".join(terms)
logging.info(f"searching: {query}")
result = self.spotify.search(query)['tracks']['items']
if result:
track = result[0]
try:
release_date = datetime.strptime(track['album']['release_date'], "%Y-%m-%d")
except ValueError:
continue
# Don't append if the release date of the track is older than one week ago
if release_date < one_week_ago:
continue
tracks.append(track['id'])
return tracks
def _get_current_tracks(self):
return self.spotify.user_playlist(self.spotify_username, self.spotify_playlist_id)['tracks']['items']
def _get_tracks_to_add(self, tracks, current_tracks):
current_track_ids = [c_track['track']['id'] for c_track in current_tracks]
tracks = [track for track in tracks if track not in current_track_ids]
return tracks
def _add_to_spotify(self, tracks):
if not tracks:
return
self.spotify.user_playlist_add_tracks(self.spotify_username, self.spotify_playlist_id, tracks, position=0)
@solid
def load_to_spotify(context):
MusicFeeder().execute()
@pipeline
def load_to_spotify_pipeline():
load_to_spotify()
def etl_spotify_repository():
return RepositoryDefinition(
'etl_spotify_repository', pipeline_defs=[load_to_spotify_pipeline]
)
@schedules(SystemCronScheduler)
def spotify_schedules():
return [
ScheduleDefinition(
name='daily_spotify_batch',
cron_schedule='0 * * * *',
pipeline_name='load_to_spotify_pipeline',
environment_dict={},
)
]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
MusicFeeder().execute()
# EtlFeedlyspotify(context=context).execute()
# result = execute_pipeline(load_to_spotify_pipeline)
# assert result.success
# def test_hello_cereal_solid():
# res = execute_solid(hello_cereal)
# assert res.success
# assert len(res.output_value()) == 77
# def test_hello_cereal_pipeline():
# res = execute_pipeline(hello_cereal_pipeline)
# assert res.success
# assert len(res.result_for_solid('hello_cereal').output_value()) == 77
| ikedaosushi/ikedaosushi-dagster | spotify.py | spotify.py | py | 6,273 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "spotipy.SpotifyOAuth",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "env.SPOTIPY_CLIENT_ID",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "env.SPOTI... |
5371067321 | from splinter import Browser
from bs4 import BeautifulSoup
import time
import pandas as pd
import pymongo
import requests
def init_browser():
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
# NASA Mars News
url= "https://mars.nasa.gov/news/"
browser.visit(url)
html= browser.html
soup= BeautifulSoup(html,"html.parser")
latest_news_title= soup.find("div", class_="bottom_gradient").find("h3").text
p_text= soup.find("div", class_= "article_teaser_body").text
# JPL Mars Space Images
img_url= "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(img_url)
time.sleep(5)
img_html= browser.html
i_soup= BeautifulSoup(img_html,"html.parser")
img_path= i_soup.find("article", class_= "carousel_item")\
["style"].replace("background-image: url(","").replace(");", "")[1:-1]
mars_url= "https://www.jpl.nasa.gov"
featured_image_url= mars_url+img_path
# Mars Weather
w_url= "https://twitter.com/marswxreport?lang=en"
browser.visit(w_url)
time.sleep(5)
html= browser.html
soup= BeautifulSoup(html, "html.parser")
mars_weather = (soup.find('div', attrs={"data-testid": "tweet"}).get_text()).split('InSight ')[1]
# Mars Facts
f_url= "https://space-facts.com/mars/"
browser.visit(f_url)
time.sleep(5)
m_facts= pd.read_html(f_url)
mars_df= m_facts[0]
mars_facts= mars_df.rename(columns={0: "Aspect", 1: "Detail"}).set_index("Aspect")
mars_df_table= mars_facts.to_html()
mars_df_table.replace("\n","")
# Mars Hemispheres
h_url= "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(h_url)
time.sleep(5)
h_html= browser.html
soup= BeautifulSoup(h_html, "html.parser")
h_images= soup.find("div", class_= "collapsible results")
hems= h_images.find_all("a")
hems_list= []
for hemisphere in hems:
if hemisphere.h3:
title=hemisphere.h3.text
link=hemisphere["href"]
home_url="https://astrogeology.usgs.gov/"
next_url=home_url+link
browser.visit(next_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
hemisphere2=soup.find("div",class_= "downloads")
img=hemisphere2.ul.a["href"]
hemisphere_dict={}
hemisphere_dict["Title"]=title
hemisphere_dict["Image_URL"]=img
hems_list.append(hemisphere_dict)
browser.back()
hems_list
Mars_info={
"Mars_news_title": latest_news_title,
"Mars_news_text": p_text,
"Mars_featured_img": featured_image_url,
"Mars_weather": mars_weather,
"Mars_facts": mars_df_table,
"Mars_hemispheres": hems_list
}
browser.quit()
return Mars_info
# if __name__ == "__main__":
# data = scrape()
# print(data) | omeza3547/web-scraping-challenge | Mission_to_Mars/scrape_mars.py | scrape_mars.py | py | 3,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "splinter.Browser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
7775693352 | from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
import pandas as pd
import random
from src.dataset import get_load_data
import torch
from tqdm import tqdm
def generate_train_set(train):
train_set = []
classes = [i[1] for i in train]
classes = np.array(classes)
for i in tqdm(range(len(train))):
# positive_list = [j for j in range(len(classes)) if classes[j] == train[i][1]]
positive_list = np.argwhere(classes == train[i][1])
positive_ind = list(random.choice(positive_list))[0]
while positive_ind == i:
positive_ind = list(random.choice(positive_list))[0]
negative_list = np.argwhere(classes != train[i][1])
negative_ind = list(random.choice(negative_list))[0]
train_set.append({"anchor": train[i][0], "positive": train[positive_ind][0], "negative": train[negative_ind][0], "anchor_label": train[i][1]})
return train_set
class TripletLossDataset(Dataset):
def __init__(self, train_set = None, train=True, root = "data", dataset = "FashionMNIST"):
self.is_train = train
if self.is_train:
if train_set is None:
train, _ = get_load_data(root = root, dataset = dataset)
self.train_set = generate_train_set(train)
else:
self.train_set = train_set
else:
_, self.test = get_load_data(root = root, dataset = dataset)
def __len__(self):
if self.is_train:
return len(self.train_set)
else:
return len(self.test)
def __getitem__(self, item):
if self.is_train:
data = self.train_set[item]
return data['anchor'], data['positive'], data['negative'], data['anchor_label']
else:
return self.test[item][0], self.test[item][1] | tappyness1/triplet-loss-revisit | src/dataloader.py | dataloader.py | py | 1,930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number... |
73036997545 | from data_handler import DataHandler
from scanner import Scanner
from datetime import datetime
from queue import Queue
from threading import Thread
import sys
def printStats(devices_count, sensor_type):
time = datetime.now().strftime("%H:%M:%S")
print("%s | %d {:>4} devices found.".format(
sensor_type) % (time, devices_count))
def queueScans(scanner):
jobs = Queue()
threads_list = list()
t1 = Thread(target=lambda q: q.put(
scanner.count_bt_devices()), args=(jobs,))
t1.daemon = True # Kill thread on exit
t1.start()
threads_list.append(t1)
t2 = Thread(target=lambda q: q.put(
scanner.count_wifi_devices()), args=(jobs,))
t2.daemon = True # Kill thread on exit
t2.start()
threads_list.append(t2)
# Join threads
for t in threads_list:
t.join()
# t.keepRunning = False
scan_results = []
# Get return values from threads
while not jobs.empty():
result = jobs.get()
scan_results.append(result)
return scan_results
def main():
scanner = Scanner()
data_handler = DataHandler()
while True:
try:
data = queueScans(scanner)
bt_devices_count = data[0]
wifi_devices_count = data[1]
sensor_type = "bt"
printStats(bt_devices_count, sensor_type)
# data_handler.send_data(bt_devices_count, sensor_type)
data_handler.save_data(bt_devices_count, sensor_type)
sensor_type = "wifi"
printStats(wifi_devices_count, sensor_type)
# data_handler.send_data(wifi_devices_count, sensor_type)
data_handler.save_data(wifi_devices_count, sensor_type)
except (KeyboardInterrupt, SystemExit):
sys.exit()
if __name__ == "__main__":
main()
| AlexNaga/rpi-people-counter | scanner/main.py | main.py | py | 1,832 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "queue.Queue",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "threading.Thread"... |
22911051083 | # %%
from typing import Dict, List
from kfp import dsl
@dsl.component(base_image="zhuwq0/quakeflow:latest")
def cut_templates(
root_path: str,
region: str,
config: Dict,
index: int = 0,
model_path: str = "../PhaseNet/",
mseed_list: List = None,
protocol: str = "file",
bucket: str = "",
token: Dict = None,
) -> str:
import json
import multiprocessing as mp
import os
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from glob import glob
from multiprocessing.pool import ThreadPool
from pathlib import Path
import gamma
import numpy as np
import obspy
import pandas as pd
from tqdm import tqdm
def extract_template_numpy(
template_fname,
traveltime_fname,
traveltime_index_fname,
traveltime_type_fname,
arrivaltime_index_fname,
snr_fname,
mseed_path,
events,
stations,
picks,
config,
lock,
ibar,
):
template_array = np.memmap(template_fname, dtype=np.float32, mode="r+", shape=tuple(config["template_shape"]))
traveltime_array = np.memmap(
traveltime_fname, dtype=np.float32, mode="r+", shape=tuple(config["traveltime_shape"])
)
traveltime_index_array = np.memmap(
traveltime_index_fname, dtype=np.int32, mode="r+", shape=tuple(config["traveltime_shape"])
)
traveltime_type_array = np.memmap(
traveltime_type_fname, dtype=np.int32, mode="r+", shape=tuple(config["traveltime_shape"])
)
arrivaltime_index_array = np.memmap(
arrivaltime_index_fname, dtype=np.int64, mode="r+", shape=tuple(config["traveltime_shape"])
)
snr_array = np.memmap(snr_fname, dtype=np.float32, mode="r+", shape=tuple(config["snr_shape"]))
# %%
tmp = mseed_path.split("/")
year_jday, hour = tmp[-2], tmp[-1]
begin_time = datetime.strptime(f"{year_jday}T{hour}", "%Y-%jT%H").replace(tzinfo=timezone.utc)
end_time = begin_time + timedelta(hours=1)
events_ = events[(events["event_time"] > begin_time) & (events["event_time"] < end_time)]
if len(events_) == 0:
return 0
# %%
waveforms_dict = {}
for station_id in tqdm(
stations["station_id"], desc=f"Loading waveform: ", position=ibar % 6, nrows=7, mininterval=5, leave=True
):
for c in config["components"]:
if os.path.exists(f"{mseed_path}/{station_id}{c}.mseed"):
try:
stream = obspy.read(f"{mseed_path}/{station_id}{c}.mseed")
stream.merge(fill_value="latest")
if len(stream) > 1:
print(f"More than one trace: {stream}")
trace = stream[0]
if trace.stats.sampling_rate != config["sampling_rate"]:
if trace.stats.sampling_rate % config["sampling_rate"] == 0:
trace.decimate(int(trace.stats.sampling_rate / config["sampling_rate"]))
else:
trace.resample(config["sampling_rate"])
# trace.detrend("linear")
# trace.taper(max_percentage=0.05, type="cosine")
# trace.filter("bandpass", freqmin=1.0, freqmax=15.0, corners=2, zerophase=True)
waveforms_dict[f"{station_id}{c}"] = trace
except Exception as e:
print(e)
continue
# %%
picks["station_component_index"] = picks.apply(lambda x: f"{x.station_id}.{x.phase_type}", axis=1)
# %%
num_event = 0
for ii, event_index in tqdm(
enumerate(events_["event_index"]),
total=len(events_),
desc=f"Cutting event {year_jday}T{hour}",
position=ibar % 6,
nrows=7,
mininterval=5,
leave=True,
):
if event_index not in picks.index:
continue
picks_ = picks.loc[[event_index]]
picks_ = picks_.set_index("station_component_index")
event_loc = events_.loc[event_index][["x_km", "y_km", "z_km"]].to_numpy().astype(np.float32)
event_loc = np.hstack((event_loc, [0]))[np.newaxis, :]
station_loc = stations[["x_km", "y_km", "z_km"]].to_numpy()
template_ = np.zeros((6, len(stations), config["nt"]), dtype=np.float32)
snr_ = np.zeros((6, len(stations)), dtype=np.float32)
traveltime_ = np.zeros((2, len(stations)), dtype=np.float32)
traveltime_type_ = np.zeros((2, len(stations)), dtype=np.int32)
arrivaltime_index_ = np.zeros((2, len(stations)), dtype=np.int64)
for i, phase_type in enumerate(["P", "S"]):
traveltime = gamma.seismic_ops.calc_time(
event_loc,
station_loc,
[phase_type.lower() for _ in range(len(station_loc))],
vel={"p": 6.0, "s": 6.0 / 1.73},
).squeeze()
phase_timestamp_pred = events_.loc[event_index]["event_timestamp"] + traveltime
# predicted_phase_time = [events_.loc[event_index]["event_time"] + pd.Timedelta(seconds=x) for x in traveltime]
mean_shift = []
for j, station_id in enumerate(stations["station_id"]):
if f"{station_id}.{phase_type}" in picks_.index:
## TODO: check if multiple phases for the same station
phase_timestamp = picks_.loc[f"{station_id}.{phase_type}"]["phase_timestamp"]
phase_timestamp_pred[j] = phase_timestamp
mean_shift.append(
phase_timestamp - (events_.loc[event_index]["event_timestamp"] + traveltime[j])
)
traveltime[j] = phase_timestamp - events_.loc[event_index]["event_timestamp"]
traveltime_type_[i, j] = 1
arrivaltime_index_[i, j] = int(round(phase_timestamp * config["sampling_rate"]))
# arrivaltime_index_[i, j] = phase_timestamp
else:
traveltime_type_[i, j] = 0
# if len(mean_shift) > 0:
# mean_shift = float(np.median(mean_shift))
# else:
# mean_shift = 0
# phase_timestamp_pred[traveltime_type_[i, :] == 0] += mean_shift
# traveltime[traveltime_type_[i, :] == 0] += mean_shift
traveltime_[i, :] = traveltime
for j, station in enumerate(stations.iloc):
station_id = station["station_id"]
empty_data = True
for c in station["component"]:
c_index = i * 3 + config["component_mapping"][c]
if f"{station_id}{c}" in waveforms_dict:
trace = waveforms_dict[f"{station_id}{c}"]
begin_time = (
phase_timestamp_pred[j]
- trace.stats.starttime.datetime.replace(tzinfo=timezone.utc).timestamp()
- config["time_before"]
)
end_time = (
phase_timestamp_pred[j]
- trace.stats.starttime.datetime.replace(tzinfo=timezone.utc).timestamp()
+ config["time_after"]
)
trace_data = trace.data[
max(0, int(begin_time * trace.stats.sampling_rate)) : max(
0, int(end_time * trace.stats.sampling_rate)
)
].astype(np.float32)
if len(trace_data) < config["nt"]:
continue
std = np.std(trace_data)
if std == 0:
continue
empty_data = False
template_[c_index, j, : config["nt"]] = trace_data[: config["nt"]]
s = np.std(trace_data[-int(config["time_after"] * config["sampling_rate"]) :])
n = np.std(trace_data[: int(config["time_before"] * config["sampling_rate"])])
if n == 0:
snr_[c_index, j] = 0
else:
snr_[c_index, j] = s / n
# template_array[event_index] = template_
# traveltime_array[event_index] = traveltime_
# traveltime_index_array[event_index] = np.round(traveltime_ * config["sampling_rate"]).astype(np.int32)
# traveltime_type_array[event_index] = traveltime_type_
# arrivaltime_index_array[event_index] = arrivaltime_index_
# snr_array[event_index] = snr_
template_array[ii] = template_
traveltime_array[ii] = traveltime_
traveltime_index_array[ii] = np.round(traveltime_ * config["sampling_rate"]).astype(np.int32)
traveltime_type_array[ii] = traveltime_type_
arrivaltime_index_array[ii] = arrivaltime_index_
snr_array[ii] = snr_
with lock:
template_array.flush()
traveltime_array.flush()
traveltime_index_array.flush()
traveltime_type_array.flush()
arrivaltime_index_array.flush()
snr_array.flush()
# num_event += 1
# if num_event > 20:
# break
# %%
result_path = f"{region}/cctorch"
if not os.path.exists(f"{root_path}/{result_path}"):
os.makedirs(f"{root_path}/{result_path}")
# %%
stations = pd.read_json(f"{root_path}/{region}/obspy/stations.json", orient="index")
stations["station_id"] = stations.index
# stations = stations[
# (stations["longitude"] >= config.xlim_degree[0])
# & (stations["longitude"] =< config.xlim_degree[1])
# & (stations["latitude"] >= config.ylim_degree[0])
# & (stations["latitude"] <= config.ylim_degree[1])
# ]
# stations["distance_km"] = stations.apply(
# lambda x: math.sqrt((x.latitude - config.latitude0) ** 2 + (x.longitude - config.longitude0) ** 2)
# * config.degree2km,
# axis=1,
# )
# stations.sort_values(by="distance_km", inplace=True)
# stations.drop(columns=["distance_km"], inplace=True)
# stations.sort_values(by="latitude", inplace=True)
stations["x_km"] = stations.apply(
lambda x: (x.longitude - config["longitude0"]) * np.cos(np.deg2rad(config["latitude0"])) * config["degree2km"],
axis=1,
)
stations["y_km"] = stations.apply(lambda x: (x.latitude - config["latitude0"]) * config["degree2km"], axis=1)
stations["z_km"] = stations.apply(lambda x: -x["elevation_m"] / 1e3, axis=1)
# %%
events = pd.read_csv(f"{root_path}/{region}/gamma/gamma_events.csv", parse_dates=["time"])
events = events[events["time"].notna()]
events.sort_values(by="time", inplace=True)
events.rename(columns={"time": "event_time"}, inplace=True)
events["event_time"] = events["event_time"].apply(lambda x: pd.to_datetime(x, utc=True))
events["event_timestamp"] = events["event_time"].apply(lambda x: x.timestamp())
events["x_km"] = events.apply(
lambda x: (x.longitude - config["longitude0"]) * np.cos(np.deg2rad(config["latitude0"])) * config["degree2km"],
axis=1,
)
events["y_km"] = events.apply(lambda x: (x.latitude - config["latitude0"]) * config["degree2km"], axis=1)
events["z_km"] = events.apply(lambda x: x.depth_km, axis=1)
# %%
if "event_index" not in events.columns:
event_index = events.index
else:
event_index = list(events["event_index"])
event_index_fname = f"{root_path}/{result_path}/event_index.txt"
with open(event_index_fname, "w") as f:
for i, idx in enumerate(event_index):
f.write(f"{i},{idx}\n")
config["cctorch"]["event_index_file"] = event_index_fname
# %%
picks = pd.read_csv(
f"{root_path}/{region}/gamma/gamma_picks.csv",
parse_dates=["phase_time"],
)
picks = picks[picks["event_index"] != -1]
picks["phase_timestamp"] = picks["phase_time"].apply(lambda x: x.timestamp())
picks_ = picks.groupby("station_id").size()
# station_id_ = picks_[picks_ > (picks_.sum() / len(picks_) * 0.1)].index
# stations = stations[stations["station_id"].isin(station_id_)]
stations = stations[stations["station_id"].isin(picks_.index)]
stations.to_json(f"{root_path}/{result_path}/stations_filtered.json", orient="index", indent=4)
stations.to_csv(f"{root_path}/{result_path}/stations_filtered.csv", index=True, index_label="station_id")
station_index_fname = f"{root_path}/{result_path}/station_index.txt"
with open(station_index_fname, "w") as f:
for i, sta in enumerate(stations.iloc):
f.write(f"{i},{sta['station_id']},{sta['component']}\n")
config["cctorch"]["station_index_file"] = station_index_fname
# %%
picks = picks.merge(stations, on="station_id")
picks = picks.merge(events, on="event_index", suffixes=("_station", "_event"))
# %%
events["index"] = events["event_index"]
events.set_index("index", inplace=True)
picks["index"] = picks["event_index"]
picks.set_index("index", inplace=True)
# %%
nt = int((config["cctorch"]["time_before"] + config["cctorch"]["time_after"]) * config["cctorch"]["sampling_rate"])
config["cctorch"]["nt"] = nt
nch = 6 ## For [P,S] phases and [E,N,Z] components
# nev = int(events.index.max()) + 1
nev = len(events)
nst = len(stations)
print(f"nev: {nev}, nch: {nch}, nst: {nst}, nt: {nt}")
template_shape = (nev, nch, nst, nt)
traveltime_shape = (nev, nch // 3, nst)
snr_shape = (nev, nch, nst)
config["cctorch"]["template_shape"] = template_shape
config["cctorch"]["traveltime_shape"] = traveltime_shape
config["cctorch"]["snr_shape"] = snr_shape
template_fname = f"{root_path}/{result_path}/template.dat"
traveltime_fname = f"{root_path}/{result_path}/traveltime.dat"
traveltime_index_fname = f"{root_path}/{result_path}/traveltime_index.dat"
traveltime_type_fname = f"{root_path}/{result_path}/traveltime_type.dat"
arrivaltime_index_fname = f"{root_path}/{result_path}/arrivaltime_index.dat"
snr_fname = f"{root_path}/{result_path}/snr.dat"
config["cctorch"]["template_file"] = template_fname
config["cctorch"]["traveltime_file"] = traveltime_fname
config["cctorch"]["traveltime_index_file"] = traveltime_index_fname
config["cctorch"]["traveltime_type_file"] = traveltime_type_fname
config["cctorch"]["arrivaltime_index_file"] = arrivaltime_index_fname
config["cctorch"]["snr_file"] = snr_fname
template_array = np.memmap(template_fname, dtype=np.float32, mode="w+", shape=template_shape)
traveltime_array = np.memmap(traveltime_fname, dtype=np.float32, mode="w+", shape=traveltime_shape)
traveltime_index_array = np.memmap(traveltime_index_fname, dtype=np.int32, mode="w+", shape=traveltime_shape)
traveltime_type_array = np.memmap(traveltime_type_fname, dtype=np.int32, mode="w+", shape=traveltime_shape)
arrivaltime_index_array = np.memmap(arrivaltime_index_fname, dtype=np.int64, mode="w+", shape=traveltime_shape)
snr_array = np.memmap(snr_fname, dtype=np.float32, mode="w+", shape=snr_shape)
with open(f"{root_path}/{result_path}/config.json", "w") as f:
json.dump(config["cctorch"], f, indent=4, sort_keys=True)
# %%
dirs = sorted(glob(f"{root_path}/{region}/waveforms/????-???/??"))
ncpu = mp.cpu_count()
lock = mp.Lock()
# with mp.get_context("spawn").Pool(ncpu) as pool:
with ThreadPool(ncpu) as pool:
pool.starmap(
extract_template_numpy,
[
(
template_fname,
traveltime_fname,
traveltime_index_fname,
traveltime_type_fname,
arrivaltime_index_fname,
snr_fname,
d,
events,
stations,
picks,
config["cctorch"],
lock,
i,
)
for i, d in enumerate(dirs)
],
)
if __name__ == "__main__":
import json
import os
root_path = "local"
region = "demo"
with open(f"{root_path}/{region}/config.json", "r") as fp:
config = json.load(fp)
cut_templates.python_func(root_path, region=region, config=config)
| YuancongGou/QuakeFlow | slurm_MON/cut_templates.py | cut_templates.py | py | 17,298 | python | en | code | null | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.memmap",
"line_number":... |
20528028503 | import os
import re
import xlrd
import xlwt
from xlutils.copy import copy
class HandleExcel:
def __init__(self):
self.root = ''
self.dir = ''
self.files = ''
self.allFile_url = ''
self.final_file_name = '测试数据清洗版.xls'
self.title = ['条码内容', '故障通道', '测试时间']
self.file_name = '测试数据汇总.xls'
def get_allfile_msg(self,file_dir):
for root, dirs, files in os.walk(file_dir):
return root, dirs, files
def get_allfile_url(self,root, files):
allFile_url = []
for f in files:
file_url = root + '\\' + f
allFile_url.append(file_url)
return allFile_url
def all_to_one(self,root, allFile_url, files, file_name='allExcel.xls', title=None):
file_name = root + '\\' + file_name
self.create_excel(file_name, title)
list_row_data = []
for f in allFile_url:
excel = xlrd.open_workbook(f)
table = excel.sheet_by_index(0)
for i, j in zip(range(table.nrows), files):
if i == 0:
continue
row = table.row_values(i)
k = re.sub("\D", "", j)
row.append(k)
list_row_data.append(row)
self.add_row(list_row_data, file_name)
def create_excel(self,file_name, title):
a = xlwt.Workbook()
table = a.add_sheet('sheet1', cell_overwrite_ok=True)
for i in range(len(title)):
table.write(0, i, title[i])
a.save(file_name)
file_name = '测试数据汇总.xls'
title = ['条码内容', '故障通道', '测试时间']
def add_row(self,list_row_data, file_name):
allExcel1 = xlrd.open_workbook(file_name)
sheet = allExcel1.sheet_by_index(0)
allExcel2 = copy(allExcel1)
sheet2 = allExcel2.get_sheet(0)
i = 0
for row_data in list_row_data:
for j in range(len(row_data)):
sheet2.write(sheet.nrows + i, j, row_data[j])
i += 1 # 覆盖原文件
allExcel2.save(file_name)
def clear_excel(self,file_dir,file_name):
data = xlrd.open_workbook(file_dir + '\\' + file_name)
table = data.sheets()[0]
nrows = table.nrows
ncols = table.ncols
first_ncol = []
second_ncol = []
thrid_ncol = []
for i in range(1, nrows):
first_ncol_value = table.cell(i, 0).value
first_ncol.append(first_ncol_value)
for j in range(1, nrows):
second_ncol_value = table.cell(j, 1).value
second_ncol.append(second_ncol_value)
for k in range(1, nrows):
thrid_ncol_value = table.cell(k, 2).value
thrid_ncol.append(thrid_ncol_value)
clear_code_list = []
clear_result_list = []
clear_time_list = []
for x, y, z in zip(first_ncol, second_ncol, thrid_ncol):
if x not in clear_code_list and y == '\'PASS\'' or y == 'PASS' and x.startswith('A'):
clear_code_list.append(x)
clear_result_list.append(y)
clear_time_list.append(z)
a = xlwt.Workbook()
table = a.add_sheet('sheet1', cell_overwrite_ok=True)
# 写表头
for i in range(len(self.title)):
table.write(0, i, self.title[i])
# 写内容
for code_value, result_value, time_value in zip(range(len(clear_code_list)), range(len(clear_result_list)),
range(len(clear_time_list))):
table.write(code_value + 1, 0, clear_code_list[code_value])
table.write(result_value + 1, 1, clear_result_list[code_value])
table.write(time_value + 1, 2, clear_time_list[code_value])
a.save(file_dir + '\\' + self.final_file_name)
| chenyaqiao0505/Code111 | HandleExcel/excel_method.py | excel_method.py | py | 3,916 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "xlwt.Workbook",
"line_number": ... |
33552308938 | #!/usr/bin/env python3
import os
import logging
import argparse
import csv
def process_result(result_folder):
status = "success"
summary = []
paths = []
tests = ["TLPWhere", "TLPGroupBy", "TLPHaving", "TLPWhereGroupBy", "TLPDistinct", "TLPAggregate"]
for test in tests:
err_path = '{}/{}.err'.format(result_folder, test)
out_path = '{}/{}.out'.format(result_folder, test)
if not os.path.exists(err_path):
logging.info("No output err on path %s", err_path)
summary.append((test, "SKIPPED"))
elif not os.path.exists(out_path):
logging.info("No output log on path %s", out_path)
else:
paths.append(err_path)
paths.append(out_path)
with open(err_path, 'r') as f:
if 'AssertionError' in f.read():
summary.append((test, "FAIL"))
status = 'failure'
else:
summary.append((test, "OK"))
logs_path = '{}/logs.tar.gz'.format(result_folder)
if not os.path.exists(logs_path):
logging.info("No logs tar on path %s", logs_path)
else:
paths.append(logs_path)
stdout_path = '{}/stdout.log'.format(result_folder)
if not os.path.exists(stdout_path):
logging.info("No stdout log on path %s", stdout_path)
else:
paths.append(stdout_path)
stderr_path = '{}/stderr.log'.format(result_folder)
if not os.path.exists(stderr_path):
logging.info("No stderr log on path %s", stderr_path)
else:
paths.append(stderr_path)
description = "SQLancer test run. See report"
return status, description, summary, paths
def write_results(results_file, status_file, results, status):
with open(results_file, 'w') as f:
out = csv.writer(f, delimiter='\t')
out.writerows(results)
with open(status_file, 'w') as f:
out = csv.writer(f, delimiter='\t')
out.writerow(status)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of sqlancer test")
parser.add_argument("--in-results-dir", default='/test_output/')
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
args = parser.parse_args()
state, description, test_results, logs = process_result(args.in_results_dir)
logging.info("Result parsed")
status = (state, description)
write_results(args.out_results_file, args.out_status_file, test_results, status)
logging.info("Result written")
| ByConity/ByConity | docker/test/sqlancer/process_sqlancer_result.py | process_sqlancer_result.py | py | 2,749 | python | en | code | 1,352 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_n... |
1436356058 | import pathlib
import math
import pygame as pg
# bd is short for board
bd_size = 900
grid_size = bd_size // 19
stone_radius = grid_size // 2
cross_half_len = stone_radius // 3
center_circle = [(3, 3), (3, 9), (3, 15), (9, 3), (9, 9), (9, 15),
(15, 3), (15, 9), (15, 15)]
circle_radius = stone_radius // 5
base_dir = pathlib.Path(__file__).parent.parent
resource_dir = base_dir / 'resource'
bg_img_path = resource_dir / 'img' / 'back.png'
bg_music_path = resource_dir / 'music' / 'back.flac'
bg_music_volume = 0.6
bg_music_diff = 0.15
white_id = 0
black_id = 1
notice_width_grids = 8
notice_height_grids = 10
notice_width = grid_size * notice_width_grids
notice_height = grid_size * notice_height_grids
notice_leftup_coord = (5, 4)
header_font = pg.font.get_default_font()
header_size = grid_size
text_font = pg.font.get_default_font()
text_size = grid_size
class QuitException(Exception):
pass | XuanSmallProj/GoBang | src/params.py | params.py | py | 920 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.fon... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.