seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71992176033 | # -*- coding: utf-8 -*-
"""
Package: iads
File: utils.py
Année: LU3IN026 - semestre 2 - 2021-2022, Sorbonne Université
"""
# Fonctions utiles pour les TDTME de LU3IN026
# Version de départ : Février 2022
# import externe
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import seaborn as sns
import time
# ------------------------
def plot2DSet(desc,labels):
""" ndarray * ndarray -> affichage
la fonction doit utiliser la couleur 'red' pour la classe -1 et 'blue' pour la +1
"""
data_negatifs = desc[labels == -1]
data_positifs = desc[labels == +1]
#Affichage
plt.scatter(data_negatifs[:,0],data_negatifs[:,1], marker='o', color='red')
plt.scatter(data_positifs[:,0],data_positifs[:,1], marker='x', color='blue')
# ------------------------
def plot_frontiere(desc_set, label_set, classifier, step=30):
""" desc_set * label_set * Classifier * int -> NoneType
Remarque: le 4e argument est optionnel et donne la "résolution" du tracé: plus il est important
et plus le tracé de la frontière sera précis.
Cette fonction affiche la frontière de décision associée au classifieur
"""
mmax=desc_set.max(0)
mmin=desc_set.min(0)
x1grid,x2grid=np.meshgrid(np.linspace(mmin[0],mmax[0],step),np.linspace(mmin[1],mmax[1],step))
grid=np.hstack((x1grid.reshape(x1grid.size,1),x2grid.reshape(x2grid.size,1)))
# calcul de la prediction pour chaque point de la grille
res=np.array([classifier.predict(grid[i,:]) for i in range(len(grid)) ])
res=res.reshape(x1grid.shape)
# tracer des frontieres
# colors[0] est la couleur des -1 et colors[1] est la couleur des +1
plt.contourf(x1grid,x2grid,res,colors=["darksalmon","skyblue"],levels=[-1000,0,1000])
# ------------------------
def genere_dataset_uniform(p, n, binf=-1, bsup=1):
""" int * int * float^2 -> tuple[ndarray, ndarray]
Hyp: n est pair
p: nombre de dimensions de la description
n: nombre d'exemples de chaque classe
les valeurs générées uniformément sont dans [binf,bsup]
"""
dataset = np.random.uniform(binf, bsup, (2*n,p))
label = np.asarray([-1 for i in range(0,n)] + [+1 for i in range(0,n)])
return dataset, label
# ------------------------
def genere_dataset_gaussian(positive_center, positive_sigma, negative_center, negative_sigma, nb_points):
""" les valeurs générées suivent une loi normale
rend un tuple (data_desc, data_labels)
"""
neg_data = np.random.multivariate_normal(negative_center, negative_sigma, nb_points)
pos_data = np.random.multivariate_normal(positive_center, positive_sigma, nb_points)
dataset = np.vstack( (neg_data, pos_data) )
label = np.asarray([-1 for i in range(nb_points)] + [1 for i in range(nb_points)])
return dataset, label
# ------------------------
def create_XOR(n, var):
""" int * float -> tuple[ndarray, ndarray]
Hyp: n et var sont positifs
n: nombre de points voulus
var: variance sur chaque dimension
"""
negative_points_1 = np.random.multivariate_normal(np.array([0,0]), np.array([[var,0],[0,var]]), n)
negative_points_2 = np.random.multivariate_normal(np.array([1,1]), np.array([[var,0],[0,var]]), n)
positive_points_1 = np.random.multivariate_normal(np.array([1,0]), np.array([[var,0],[0,var]]), n)
positive_points_2 = np.random.multivariate_normal(np.array([0,1]), np.array([[var,0],[0,var]]), n)
descriptions = np.vstack((negative_points_1, negative_points_2, positive_points_1, positive_points_2))
labels = np.asarray([-1 for i in range(2*n)] + [+1 for i in range(2*n)])
return descriptions, labels
# ------------------------
def plot_frontiere_V3(desc_set, label_set, w, kernel, step=30, forme=1, fname="out/tmp.pdf"):
""" desc_set * label_set * array * function * int * int * str -> NoneType
Note: le classifieur linéaire est donné sous la forme d'un vecteur de poids pour plus de flexibilité
"""
# -----------
# ETAPE 1: construction d'une grille de points sur tout l'espace défini par les points du jeu de données
mmax=desc_set.max(0)
mmin=desc_set.min(0)
x1grid,x2grid=np.meshgrid(np.linspace(mmin[0],mmax[0],step),np.linspace(mmin[1],mmax[1],step))
grid=np.hstack((x1grid.reshape(x1grid.size,1),x2grid.reshape(x2grid.size,1)))
# -----------
# Si vous avez du mal à saisir le concept de la grille, décommentez ci-dessous
#plt.figure()
#plt.scatter(grid[:,0],grid[:,1])
#if True:
# return
# -----------
# ETAPE 2: calcul de la prediction pour chaque point de la grille
res=np.array([kernel(grid[i,:])@w for i in range(len(grid)) ])
# pour les affichages avancés, chaque dimension est présentée sous la forme d'une matrice
res=res.reshape(x1grid.shape)
# -----------
# ETAPE 3: le tracé
#
# CHOIX A TESTER en décommentant:
# 1. lignes de contours + niveaux
if forme <= 2 :
fig, ax = plt.subplots() # pour 1 et 2
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
if forme == 1:
CS = ax.contour(x1grid,x2grid,res)
ax.clabel(CS, inline=1, fontsize=10)
#
# 2. lignes de contour 0 = frontière
if forme == 2:
CS = ax.contour(x1grid,x2grid,res, levels=[0], colors='k')
#
# 3. fonction de décision 3D
if forme == 3 or forme == 4:
fig = plt.gcf()
ax = fig.gca(projection='3d') # pour 3 et 4
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('f(X)')
#
if forme == 3:
surf = ax.plot_surface(x1grid,x2grid,res, cmap=cm.coolwarm)
#
# 4. fonction de décision 3D contour grid + transparence
if forme == 4:
norm = plt.Normalize(res.min(), res.max())
colors = cm.coolwarm(norm(res))
rcount, ccount, _ = colors.shape
surf = ax.plot_surface(x1grid,x2grid,res, rcount=rcount, ccount=ccount, facecolors=colors, shade=False)
surf.set_facecolor((0,0,0,0))
# -----------
# ETAPE 4: ajout des points
negatifs = desc_set[label_set == -1] # Ensemble des exemples de classe -1
positifs = desc_set[label_set == +1] # +1
# Affichage de l'ensemble des exemples en 2D:
if forme <= 2:
ax.scatter(negatifs[:,0],negatifs[:,1], marker='o', c='b') # 'o' pour la classe -1
ax.scatter(positifs[:,0],positifs[:,1], marker='x', c='r') # 'x' pour la classe +1
else:
# on peut ajouter une 3ème dimension si on veut pour 3 et 4
ax.scatter(negatifs[:,0],negatifs[:,1], -1, marker='o', c='b') # 'o' pour la classe -1
ax.scatter(positifs[:,0],positifs[:,1], 1, marker='x', c='r') # 'x' pour la classe +1
# -----------
# ETAPE 5 en 3D: régler le point de vue caméra:
if forme == 3 or forme == 4:
ax.view_init(20, 70) # a régler en fonction des données
# -----------
# ETAPE 6: sauvegarde (le nom du fichier a été fourni en argument)
if fname != None:
# avec les options pour réduires les marges et mettre le fond transprent
plt.savefig(fname,bbox_inches='tight', transparent=True,pad_inches=0)
# --------------------------------------
def calculCout(X,Y, ensemble):
cost=[]
cost.clear()
for i in range(len(ensemble)):
w = ensemble[i].copy()
y_hat = np.dot(X,w) # f(X)
C = np.multiply(y_hat, Y) # f(X).Y
C = np.ones(200)-C # 1 - f(X).Y
C[C <= 0] = 0
cost.append(np.sum(C))
return cost
# -------------------------------------------------------------
def crossval(X, Y, n_iterations, iteration):
taille_bloc = len(Y)//n_iterations
start,end = iteration*int(len(Y)/n_iterations), (iteration+1)*int(len(Y)/n_iterations)
Xapp, Yapp = np.delete(X,np.s_[start:end],axis=0), np.delete(Y,np.s_[start:end],axis=0)
Xtest, Ytest = X[start:end,:], Y[start:end]
return Xapp, Yapp, Xtest, Ytest
def PCA(X, affichage=False):
# calcul des vecteurs propres
A = X.T @ X
lam, V = np.linalg.eig(A)
# affichage (print)
print("le nombre de valeurs propres : {}".format(len(lam)))
# tri et sélection des 2 vecteurs associés aux 2 plus grandes valeurs propres
sorted_index = np.argsort(lam)[::-1]
sorted_eigenvalue = lam[sorted_index]
sorted_eigenvectors = V[:,sorted_index]
eigenvector_subset = sorted_eigenvectors[:,0:2]
Xr = np.dot(eigenvector_subset.transpose() , X.transpose() ).transpose()
print("Dimension de l'ensemble de départ : ",X.shape[1])
print("Dimension de l'ensemble après transformation : ",Xr.shape[1])
print("---")
if(affichage):
# affichage (plot) avec un code couleur pour les classes
plt.figure()
plt.scatter(Xr[Y==-1,0],Xr[Y==-1,1])
plt.scatter(Xr[Y==1,0],Xr[Y==1,1])
plt.legend(np.arange(10))
return Xr
def viewData(data, kde=True):
"""
visualisation d'un pandas.dataframe sous la forme d'histogramme (avec uu gaussian kernel densiy estimate si demandé
et intéressant)
Arguments:
data {pandas.dataFrame} -- le pandas.dataframe à visualiser
Keyword Arguments:
kde {bool} -- demande de l'affichage du gaussian kdf (default: {True})
"""
x = 4
y = math.ceil(len(data.keys()) / x)
plt.figure(figsize=(x * 4, y * 2))
for i, k in enumerate(data.keys()):
ax = plt.subplot(x, y, i + 1, xticklabels=[])
ax.set_title(k)
dd = sns.histplot(data[k], kde=kde and len(data[k].unique()) > 5,ax=ax)
dd.set_title("")
def normalisation(df):
for feature_name in df.columns:
max_value = df[feature_name].max()
min_value = df[feature_name].min()
result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)
return result
def treat_outliers(d,c):
'''
Traite les valeurs aberrantes
Arguments: d - DataFrame
c - chaine nom de colonne
retourne dans d la colonne c sans valeurs aberrantes
'''
# IQR
Q1 = np.percentile(d[c], 25,interpolation = 'midpoint')
Q2 = np.percentile(d[c], 50,interpolation = 'midpoint')
Q3 = np.percentile(d[c], 75,interpolation = 'midpoint')
IQR = Q3 - Q1
# Upper bound
upper = np.where(d[c] >= (Q3+1.5*IQR))
# Lower bound
lower = np.where(d[c] <= (Q1-1.5*IQR))
d[c] = np.where(d[c] <(Q1-1.5*IQR), Q2,d[c])
d[c] = np.where(d[c] >(Q3+1.5*IQR), Q2,d[c])
def train_cv(model, X, Y, niter=10):
# Entrainement du modèle perceptron multiclasse avec l'algorithme k-ppv et k = 3
index = np.random.permutation(len(X))
Xm, Ym = X[index], Y[index]
niter = 10
perf_train, perf_test = [], []
tic= time.time()
for i in range(niter):
Xapp,Yapp,Xtest,Ytest = crossval(Xm, Ym, niter, i)
model.train(Xapp, Yapp)
perf_train.append(round(model.accuracy(Xapp, Yapp),2))
perf_test.append(round(model.accuracy(Xtest, Ytest),2))
print("Kfold {} : train {} - test {}".format(i, perf_train[i], perf_test[i]))
toc= time.time()
print("---")
print("Moyenne train set: {}".format(round(np.mean(perf_train),2)))
print("Moyenne test set: {}".format(round(np.mean(perf_test),2)))
print(f"Résultat en {(toc-tic):.2} secondes")
print("---")
| samynhl/Data-Science-Project | iads/utils.py | utils.py | py | 11,437 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": ... |
916189966 | # -*- coding : UTF-8 -*-
from urllib import request
from urllib import parse
import json
if __name__ == "__main__":
Request_URL = "http://fanyi.baidu.com/v2transapi"
Form_Data = {}
Form_Data['from'] = 'en'
Form_Data['to'] = 'zh'
Form_Data['query'] = 'Android'
Form_Data['transtype'] = 'realtime'
Form_Data['simple_means_flag'] = '3'
# 使用urlencode方法转化为标准格式
data = parse.urlencode(Form_Data).encode('utf-8')
response = request.urlopen(Request_URL, data)
html = response.read().decode('utf-8')
translate_result = json.load(html)
print(translate_result[1][1])
# translate_result = json.load(html)
# data = translate_result["data"]
# print(data)
# "trans_result":{ 1
# "from": "en",
# "to": "zh",
# "domain": "all",
# "type": 2,
# "status": 0,
# "data": [ a
# { 2
# "dst": "安卓",
# "src": "Android",
# "relation": [],
# "result": [ b
# [ c
# 0,
# "安卓",
# [ d
# "0|7"
# ] d,
# [] d,
# [ d
# "0|7"
# ] d,
# [ d
# "0|6"
# ] d
# ] c
# ]b
# }2
# ] a,
# "phonetic": [ a
# { 2
# "src_str": "安",
# "trg_str": "ān"
# } 2,
# { 2
# "src_str": "卓",
# "trg_str": "zhuó"
# } 2
# ] a
# } 1,
# "dict_result": {
# "edict": "",
# "zdict": "",
# "from": "kingsoft",
# "simple_means": {
# "word_name": "Android",
# "from": "kingsoft",
# "word_means": [
# "似人自动机",
# "机器人",
# "基于Linux平台的开源手机操作系统,主要使用于便携设备。目前尚未有统一中文名称,中国大陆地区较多人称为安卓"
# ],
# "exchange": {
# "word_third": "",
# "word_done": "",
# "word_pl": "",
# "word_est": "",
# "word_ing": "",
# "word_er": "",
# "word_past": ""
# },
| NotMyYida/AndroidBookNote | pycharmWS/webcrawler/translate.py | translate.py | py | 2,579 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.parse.urlencode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.req... |
21692068618 | import discord
from discord.ext import commands
import random
from discord.ext.commands.core import command
import datetime
from discord.utils import get
class Utility(commands.Cog):
"""Utility commands for setting up the environment"""
def __init__(self,bot):
self.bot = bot
self.colors = self.bot.colors
@commands.command()
@commands.has_permissions(kick_members=True)
async def setprefix(self, ctx,*,prefix='.'):
"""Set bot prefix"""
await self.bot.pg_con.execute("UPDATE guild SET prefix = $1 WHERE guildid = $2",prefix,ctx.guild.id)
await ctx.message.reply(f"PREFIX updated to `{prefix}`")
@commands.command(aliases = ['mainchannel'])
@commands.has_permissions(administrator=True)
async def setmain(self,ctx,channel:discord.TextChannel):
"""Set the main channel for welcomes, updates etc"""
await self.bot.pg_con.execute("UPDATE guild SET mainchannel = $1 WHERE guildid = $2",channel.id,ctx.guild.id)
embed = discord.Embed(title = "Main Channel Updated",description = f"Main channel has been set to {channel.mention}",color = random.choice(self.colors))
await ctx.send(embed = embed)
@commands.command(aliases = ['sugchannel'])
@commands.has_permissions(administrator=True)
async def setsuggestion(self,ctx,channel:discord.TextChannel):
"""Set a channel for sending suggestions (in order to use the `suggest` command)"""
prefix = await self.bot.pg_con.fetchrow("SELECT prefix FROM guild WHERE guildid = $1",ctx.guild.id)
prefix = prefix[0]
await self.bot.pg_con.execute("UPDATE guild SET suggestionchannel = $1 WHERE guildid = $2",channel.id,ctx.guild.id)
embed = discord.Embed(title = "Suggestion Channel Updated",description = f"Suggestion channel has been set to {channel.mention}\nNow Type `{prefix}suggest + [suggestion]` without these '[',']' to Send a suggestion into the channel and start a Poll",color = random.choice(self.colors))
await ctx.send(embed = embed)
@commands.command(aliases = ['aichannel'])
@commands.has_permissions(administrator=True)
async def setaichatbot(self,ctx,channel:discord.TextChannel):
"""Set AI-chatbot channel to chat with AI"""
await self.bot.pg_con.execute("UPDATE guild SET aichannel = $1 WHERE guildid = $2",channel.id,ctx.guild.id)
embed = discord.Embed(title = "AI-chatbot Channel Updated",description = f"Now Enjoy talking with AI chabot in {channel.mention}\nYou can start talking to the Chatbot now. Why not start with sayin 'HI' or something :)",color = random.choice(self.colors))
await ctx.send(embed = embed)
@commands.command(aliases = ['logchannel'])
@commands.has_permissions(administrator=True)
async def setlogs(self,ctx,channel:discord.TextChannel):
"""Set logs channel to get update with logs"""
await self.bot.pg_con.execute("UPDATE guild SET logchannel = $1 WHERE guildid = $2",channel.id,ctx.guild.id)
embed = discord.Embed(title = "Log Channel Updated",description = f"Log channel has been set to {channel.mention}\nNow get the Information regarding the status, activity, Nickname, Roles, Pending of every person in the server",color = random.choice(self.colors))
await ctx.send(embed = embed)
@commands.command(aliases = ['remove_logs'])
@commands.has_permissions(administrator=True)
async def remove_log(self,ctx):
"""Remove the logs channel"""
prefix = await self.bot.pg_con.fetchrow("SELECT prefix FROM guild WHERE guildid = $1",ctx.guild.id)
prefix = prefix[0]
await self.bot.pg_con.execute("UPDATE guild SET logchannel = $1 WHERE guildid = $2",None,ctx.guild.id)
await ctx.send(embed = discord.Embed(title = "Logging Turned Off",description = f"Logging Has been Deactivated, This server won't be updated with the user's activity, status, roles, nickname anymore\nType `{prefix}set_logs #log_channel` to start enojoying the Features again ",color = random.choice(self.colors)))
@commands.command(aliases = ['remove_suggestion'])
@commands.has_permissions(administrator=True)
async def remove_sug(self,ctx):
"""Remove the suggestion channel and the command"""
prefix = await self.bot.pg_con.fetchrow("SELECT prefix FROM guild WHERE guildid = $1",ctx.guild.id)
prefix = prefix[0]
await self.bot.pg_con.execute("UPDATE guild SET suggestionchannel = $1 WHERE guildid = $2",None,ctx.guild.id)
await ctx.send(embed = discord.Embed(title = "Suggestion Command Turned Off",description = f"Suggestions Have been Deactivated, You won't be able to use Suggestion command in 'tis server now\nType `{prefix}set_suggestion #suggestion_channel` to start enojoying the Command again",color = random.choice(self.colors)))
@commands.command(aliases = ['remove_aichatbot'])
@commands.has_permissions(administrator=True)
async def remove_aibot(self,ctx):
"""Remove the AI-chatbot channel"""
prefix = await self.bot.pg_con.fetchrow("SELECT prefix FROM guild WHERE guildid = $1",ctx.guild.id)
prefix = prefix[0]
await self.bot.pg_con.execute("UPDATE guild SET aichannel = $1 WHERE guildid = $2",None,ctx.guild.id)
await ctx.send(embed = discord.Embed(title = "AI-Chatbot Turned Off",description = f"AI-chabot features Have been Deactivated, You won't be able to enjoy Chatbot Features now\nType `{prefix}set_aichatbot #Aichatbot_channel` to start enojoying the features again",color = random.choice(self.colors)))
@commands.command(aliases = ['remove_general'])
@commands.has_permissions(administrator=True)
async def remove_main (self,ctx):
"""Remove the general/main channel"""
prefix = await self.bot.pg_con.fetchrow("SELECT prefix FROM guild WHERE guildid = $1",ctx.guild.id)
prefix = prefix[0]
await self.bot.pg_con.execute("UPDATE guild SET mainchannel = $1 WHERE guildid = $2",None,ctx.guild.id)
await ctx.send(embed = discord.Embed(title = "Main-Channel features Turned Off",description = f"Now you won't be getting any welcome commands, leave-join events, drops etc\nType `{prefix}set_main #main_name` to enjoy these features again ",color = random.choice(self.colors)))
@commands.command(aliases = ['invite'])
async def join(self,ctx):
"""Join our main channel or invite this bot"""
embed = discord.Embed(title = "Invite",timestamp = datetime.datetime.utcnow(),color = random.choice(self.colors))
embed.add_field(name="Invite This Bot to your server",value="[Click Here to Invite](https://discord.com/api/oauth2/authorize?client_id=781737639321141268&permissions=8&scope=bot%20applications.commands)",inline=False)
embed.add_field(name="Join our Main Server",value="[Click Here to join](https://discord.gg/cRGWDtu3W8)",inline=False)
embed.set_image(url=self.bot.user.avatar_url)
embed.set_thumbnail(url = "https://i.imgur.com/j3x5FDB.png")
await ctx.send(embed = embed)
@commands.command(aliases = ['createmute'])
@commands.has_permissions(administrator=True)
async def mutedrole(self,ctx):
"""Create the muted role to use mute command"""
try:
muted = get(ctx.guild.roles,name="Muted")
for channel in ctx.guild.channels:
await channel.set_permissions(muted, send_messages = False)
await ctx.send("Muted Role Existed, Purged it's Perms to send Messages into any channel")
except:
await ctx.guild.create_role(name = "Muted",colour = 0xFF0C00)
muted = get(ctx.guild.roles, name="Muted")
for channel in ctx.guild.channels:
await channel.set_permissions(muted, send_messages = False)
await ctx.send("Muted Role Created! , Purged it's Perms as well to send Messages into any channel")
def setup(bot):
bot.add_cog(Utility(bot))
| Navneety007/DoppleGanger | cogs/utility.py | utility.py | py | 8,130 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 15,
"usage_type": "call"
},
{
"api... |
36976850467 | # -*- coding: utf-8 -*-
###########################################################
# Send data from a simple device (raspberry, gpu, watch) to another device
# version 0.5
# syntaxe:
# python this_script_name remote_ip dataname [value] or python this_script_name start_server
# eg: python this_script_name 192.168.0.4 HeadTouched
# will return "1"
# eg: python this_script_name 192.168.0.4 step 5
# will return set the data step to "5"
# Aldebaran Robotics (c) 2008 All Rights Reserved
# NB:
# To kill python processus on windows:
# taskkill /im python.exe /f
###########################################################
"""
cd dev\git\protolab_group\scripts\versatile_almemory\
python versatile.py start_server
"""
try: import cv2
except: print( "WRN: OpenCV2 not found, using cv2 methods will fail" )
try: import numpy as np
except: print( "WRN: numpy not found, using numpy methods will fail" )
import socket
import struct
import sys
import time
def myOrd( a ):
if sys.version_info[0] < 3:
return ord(a)
return a
def stringToBytes(s):
"""
convert string to bytes, whatever the python version
"""
# due to recursing into list, if the string is already bytified, do nothing !
if not isinstance( s, str ):
return s
if sys.version_info[0] < 3:
return s
data = bytes()
for i in range(len(s)):
data += struct.pack( 'c', s[i].encode('ascii') ) # 'UTF-8'
return data
def _waitSizedData( socket_object, bVerbose = False ):
"""
wait for an int little endian formatted, followed by this amount of data
return (data (without size field), size)
data can be None, meaning the connection has been closed
"""
try:
data = socket_object.recv(4) # [Errorno 104]: Connection reset by peer !
except Exception as err:
print( "ERR: Versatile._waitSizedData: exception(1): %s" % str(err) )
return None, 0
nCptNoData = 0
while( len(data) < 4 ):
#~ print( "DBG: versatile._waitSizeData. data too short and receiving zero ?!? (end cnx?)" )
time.sleep( 0.4) # warning, all server is blocked at this point !!! (at least on windows)
data = socket_object.recv(4)
nCptNoData += 1
if nCptNoData > 2:
print( "WRN: versatile._waitSizeData. client '%s' disappear, closing..." % str(socket_object.getpeername() ) )
return None, 0
#~ print( "DBG: Versatile._waitSizedData: 4 bytes received: '%s' (len:%d)" % (str(data),len(data)) )
nWaitedSize = struct.unpack( "<I", data )[0]
#print( "DBG: Versatile._waitSizedData: packet waited size: %s" % str(nWaitedSize) )
nSize = 0
if 0:
data = []
while nSize < nWaitedSize:
data.append( socket_object.recv(1) )
nSize += 1
#~ print( "DBG: Versatile._waitSizedData: packet current size: %d/%d" % (nSize,nWaitedSize) )
else:
if nWaitedSize > 3*1024*1024: # max 3 Mo
print( "ERR: packet to big, error reading? (size:%s)" % str(nWaitedSize) )
print( "data: %s" % str(data) )
if 1:
databuf = socket_object.recv(1024*1024) # flushing a fair amount #recvall ?
print( "DBG: flushed: %s" % len(databuf) )
time.sleep(1)
return None,0
try:
data = socket_object.recv(nWaitedSize) # TODO: is there a timeout that we can increase? (to prevent the patch below)
if len(data) != nWaitedSize:
if 0: print( "WRN: Versatile._waitSizedData: size is different %s!=%s (will continue to read, until ok)" % (nWaitedSize, len(data)))
if 1:
while len(data)< nWaitedSize:
data += socket_object.recv(nWaitedSize-len(data))
time.sleep(0.001)
except Exception as err:
print( "ERR: Versatile._waitSizedData: exception: %s" % str(err) )
print( "DBG: Versatile._waitSizedData: packet waited size: %s" % str(nWaitedSize) )
if 0:
# try to open close
ip, port = socket_object.getpeername()
print( "ip: %s" % ip )
print( "port: %s" % str(port) )
print( "WRN: versatile._waitSizeData. client '%s' error, reopening..." % str( ) )
socket_object.shutdown(socket.SHUT_RDWR);
socket_object.close()
socket_object.connect((ip,port))
return None, 0 # Alma 2018-10-23: adding this return, not so sure...
#~ print( "DBG: Versatile._waitSizedData: data: %s (len:%d)" % (str(data),len(data) ) )
#~ print(dir(socket_object))
return ( data, nSize )
class Versatile:
"""
Send data from a simple device (raspberry, gpu, watch) to a robot thru ALMemory.
Datas are passed thru socket (default port is 10001) using this packet format:
(using little endian format)
0: Packet size: size of the packet excluding this field - DWORD
4: Command or type - CHAR
0: ping - check working - will always return a value 1
1: value. followed by a value. eg, an answer to some command...
2: get value from a named variables container (eg a STM). followed by a string (cf string)
3: set value to a named variables container (eg a STM). followed by a string and a value (cf value multi type style) - will return True or False if error
4: ask for a new client ID - (no param) - return a string
5: set client ID (for future call) - (param: id value (string))
6: set a parameter related to this client - (param: parameter_name, parameter_value)
7: get this parameter - (param: parameter_name)
10: register for an image stream. followed by a camera index (0..255), a resolution (see VersatileResolution), an image format (see VersatileImageFormat) and a required fps (1..1000).
11: unregister from an image stream. followed by the camera index, resolution and format (NB: you can't ask twice for the same (camera,res,and format)
20: register for a sound stream
21: unregister from the sound stream
30: get a broadcasted image - followed by a broadcaster id
5: Command parameters
length dependant of the command
# all packet send will be acknowledged with at least a 1
"""
nCommandType_EOC = -1
nCommandType_Ping = 0
nCommandType_Value = 1
nCommandType_Get = 2
nCommandType_Set = 3
nCommandType_CreateClientID = 4
nCommandType_SetClientID = 5
nCommandType_SetClientParam = 6
nCommandType_GetClientParam = 7
nCommandType_SubscribeCamera = 10
nCommandType_UnsubscribeCamera = 11
nCommandType_SubscribeSound = 20
nCommandType_UnsubscribeSound = 21
nCommandType_GetBroadcasted = 30
@staticmethod
def isCommandRequiringDataName( nNumCommand ):
return nNumCommand == Versatile.nCommandType_Get \
or nNumCommand == Versatile.nCommandType_Set \
or nNumCommand == Versatile.nCommandType_GetClientParam \
or nNumCommand == Versatile.nCommandType_SetClientParam \
or nNumCommand == Versatile.nCommandType_GetBroadcasted
@staticmethod
def isCommandRequiringValue( nNumCommand ):
return nNumCommand == Versatile.nCommandType_Value \
or nNumCommand == Versatile.nCommandType_Set \
or nNumCommand == Versatile.nCommandType_SetClientID \
or nNumCommand == Versatile.nCommandType_SetClientParam
@staticmethod
def commandNumToLibelle( nNumCommand ):
listLibelle = [
"Ping",
"Value",
"Get",
"Set",
"CreateClientID",
"SetClientID",
"SetClientParam",
"GetClientParam",
"SubscribeCamera",
"UnsubscribeCamera",
"SubscribeSound",
"UnsubscribeSound",
"GetBroadcasted",
]
if nNumCommand == -1:
return "EOC"
if nNumCommand >= len(listLibelle):
return "Unknown command"
return listLibelle[nNumCommand]
class VersatileString:
"""
a string stored in a string starting by the length of the string.
0: length of the stringin byte (DWORD)
4: string (...)
"""
def __init__( self, s = "" ):
self.s = s
def set( self, s ):
self.s = s
def toPacket( self ):
"""
convert object to string
"""
data = struct.pack('<I', len(self.s)) # '<' => little endian
#~ data += self.s
# python 3 want explicit conversion (that's not so silly)
#~ data += struct.pack("s", self.s.encode('ascii'))
#~ for i in range(len(self.s)):
#~ data += struct.pack( 'c', self.s[i].encode('ascii') ) # 'UTF-8'
data += stringToBytes( self.s )
#~ print( "DBG: len data: %s" % len(data) )
return data
@staticmethod
def fromPacket( data, nOptionnalSize = -1 ):
"""
decode a string from a packet
return value, size used by the value in data
"""
if nOptionnalSize == -1:
nSize = struct.unpack_from("<I",data)[0]
nOffset = 4
else:
nSize = nOptionnalSize
nOffset = 0
if sys.version_info[0] < 3:
s = data[nOffset:nOffset+nSize]
else:
s = ""
#~ print(" nSize: %s" % nSize )
for i in range(nSize):
c = struct.unpack_from("<c",data[nOffset+i:nOffset+nSize+i])[0]
#~ print( "c: %s" % c )
c = c.decode('UTF-8')
s += str(c)
return s, nSize+4
# class VersatileString - end
@staticmethod
def autodetectType( v ):
#~ print( "DBG: type v: %s" % str(type(v)) )
if v == None:
return 0
if isinstance( v, int ) or isinstance( v, bool ):
return 1
if sys.version_info[0] < 3: # in python3 int type has now an unlimited length
if isinstance( v, long ):
if v > 0x7FFFFFFF:
print( "WRN: Versatile: truncation of long to int, value: %d" % v )
return 1
if isinstance( v, float ): # NB: True for both Unicode and byte strings
return 2
if isinstance( v, str ): # NB: True for both Unicode and byte strings # basestring change so str to switch to python3
return 3
#if isinstance( v, Versatile.VersatileImage ): #v est de type instance !
#~ print(dir(v))
#~ print(hasattr(v,"bIsGrey"))
#~ print("str: %s" % str(str(v)) )
if( hasattr(v,"bIsGrey") or hasattr(v,"aCommand") ): # CRADO, but only way to check it's a VersatileImage
return 4
if isinstance( v, list ) or isinstance( v, tuple ) :
return 9
print( "WRN: Versatile.autodetectType: unable to detect type of '%s' (current type: %s)" % ( str(v), str(type(v)) ) )
return 0
class VersatileValue:
"""
a value stored in a string compound of a length+type+data.
0: length the length taken by the data (without the type) in byte (DWORD)
4: type (BYTE)
1: int (or boolean converted to int)
2: float
3: string
4: image
5: audio chunk
9: list of versatile values
...
5: data (...)
"""
nValueType_None = 0
nValueType_Int = 1
nValueType_Float = 2
nValueType_String = 3
nValueType_Image = 4 # (see VersatileImage)
nValueType_AudioChunk = 5 # (see VersatileAudioChunk)
nValueType_List = 9 # list of VersatileValue
def __init__( self, value = None, nNumEnsureType = None ):
self._set( value, nNumEnsureType )
def _set( self, value, nNumEnsureType ):
if isinstance( value, Versatile.VersatileValue ):
# construct from a VersatileValue
self.v = value.v
assert( nNumEnsureType == None ) # can't force a type when passing a VersatileValue
self.nType = value.nType
return
self.v = value
if nNumEnsureType == None:
# auto detect
nNumEnsureType = Versatile.autodetectType(value)
self.nType = nNumEnsureType
if isinstance( self.v, bool ):
if self.v: self.v = 1
else: self.v = 0
def set( self, value, nNumEnsureType = None ):
self._set( value, nNumEnsureType )
def toPacket( self ):
"""
convert object value to string
"""
if 0: print( "DBG: VersatileValue.toPacket: packeting a value of type %s, value: '%s'" % ( str(self.nType), str(self.v) ) )
#print struct.calcsize('B') #1
#~ print(str(self))
#~ print(str(self.v))
#~ print(str(self.nType))
if self.v == None:
data = struct.pack('<I', 0) + struct.pack('B', self.nType)
else:
# TODO: perhaps explictly encode data using struct.pack?
if self.nType == Versatile.VersatileValue.nValueType_Image:
vAsString = self.v.toPacket()
elif self.nType == Versatile.VersatileValue.nValueType_List:
# we encode in the data the len of element then elements packetised
vAsString = struct.pack('<I', len(self.v) )
for i in range(len(self.v)):
valueToEncode = self.v[i]
# sometims VersatileValue can be lazy constructed with some base type included in them (like a list of int, instead of a list of versatilevalue of type int), as it's more efficient, it does'nt hurt...
if not isinstance( valueToEncode, Versatile.VersatileValue ):
valueToEncode = Versatile.VersatileValue(valueToEncode)
vAsString += valueToEncode.toPacket()
else:
# TODO: explicit encoding for string ?
vAsString = str(self.v)
data = struct.pack('<I', len(vAsString)) + struct.pack('B', self.nType) + stringToBytes(vAsString)
return data
@staticmethod # comme staticmethod, but permits using static class variable nValueType_Int # but some stuffs are then changed.
def fromPacket( data ):
"""
decode a value from a packet
return value, size used by the value in data
"""
#~ for i in range(min(len(data),8)):
#~ print( "DBG: data[%d] = %x" % ( i, myOrd(data[i])) )
nSize = struct.unpack_from('<I',data)[0]
nType = struct.unpack_from('B',data[4:4+1])[0]
nIdxStart = 5
nIdxEnd = 5+nSize
# TODO: perhaps explictly decode data using struct.unpack?
if nType == Versatile.VersatileValue.nValueType_None:
value = None
if nType == Versatile.VersatileValue.nValueType_Int:
value = int(data[nIdxStart:nIdxEnd])
elif nType == Versatile.VersatileValue.nValueType_Float:
value = float(data[nIdxStart:nIdxEnd])
elif nType == Versatile.VersatileValue.nValueType_String:
#value = str(data[nIdxStart:nIdxEnd]) # why not using fromPacket de string !!! - DONE
value = Versatile.VersatileString.fromPacket(data[nIdxStart:nIdxEnd],nSize)[0]
elif nType == Versatile.VersatileValue.nValueType_Image:
value = Versatile.VersatileImage.fromPacket(data[nIdxStart:nIdxEnd]) # nSize from fromPacket isn't used ?!?
elif nType == Versatile.VersatileValue.nValueType_List:
nNbrElement = struct.unpack_from('<I',data[nIdxStart:nIdxStart+4])[0]
nIdxStart += 4
value = []
for i in range(nNbrElement):
v,size = Versatile.VersatileValue.fromPacket(data[nIdxStart:]) # we don't now size here
nIdxStart += size
value.append( v )
else:
print( "ERR: VersatileValue.fromPacket, unknown value type: %s" % str(nType) )
#~ print( "DBG: VersatileValue.fromPacket, decoded value of type %s: %s" % (str(nType), str(value) ) )
return value, nSize+5
# class VersatileValue - end
class VersatileImage:
"""
store an image and its metadata
"""
class WantedResolution:
# client ask for a resolution that will be matched as nearest possible
QQVGA=0
QVGA=1
VGA=2
x960=3 # as: 1024x960
x1024=4 # as: 1280x1024
x1280=5 # as: 1920x1080
class Format:
GREY=1
JPG=2
PNG=3
RAW=4
def __init__( self, bVerbose = False ):
self.data = None # image data ready for sending (one dimension, string encoded, ...)
self.aCommand = [] # you can add a list of parameters # see addCommand for doc
self.bVerbose = bVerbose
self.bIsGrey = False
self.timeStamp = 0,0
self.data = ""
def createFromCvImage( self, img, timeStamp = None, nFormat = Format.JPG ):
"""
create a VersatileImage from a cv2.buffer
"""
if img is None:
return False
# reducing image
while img.shape[0]>1080:
print("WRN: versatile.createFromCvImage: reducing image..." )
img = cv2.resize( img, (img.shape[1]//2,img.shape[0]//2) )
if timeStamp == None:
timeStamp = time.time()
self.timeStamp = int(timeStamp), int(timeStamp*1000)%1000 # on 2 int: seconds & ms
self.bIsGrey = (nFormat == Versatile.VersatileImage.Format.GREY)
#self.data = img.reshape( self.nW*self.nH*nChannel,1,1).tostring()
# encoding:
#~ print( "DBG: createFromCvImage: encoding format is %d" % nFormat )
encodeParam = None
if nFormat == Versatile.VersatileImage.Format.PNG:
strFormat = ".png"
if nFormat == Versatile.VersatileImage.Format.JPG:
strFormat = ".jpg"
encodeParam = [int(cv2.IMWRITE_JPEG_QUALITY),85]
#~ print( "DBG: createFromCvImage: strFormat is %s" % strFormat )
#~ print( "DBG: createFromCvImage: self.bIsGrey is %s" % self.bIsGrey )
timeBegin = time.time()
self.data = cv2.imencode( strFormat, img, encodeParam )[1].tostring()
if 1:
# encryption
pass
#~ print( "INF: VersatileImage.createFromCvImage: encoding image takes: %5.3fs" % (time.time()-timeBegin) ) # 0.01 on Pepper in VGA
#print( "self.img in: %s" % str(img.size) )
#print( "DBG: VersatileImage.createFromCvImage: compression out: %s in: %s" % ( len(self.data), img.size ) )
def convertToCvImage( self ):
"""
return a cv2.buffer from a VersatileImage
"""
#return np.fromstring(self.data, np.uint8).reshape( self.nH,self.nW,nChannel)
if self.bIsGrey:
flag = 0
else:
try:
flag = cv2.CV_LOAD_IMAGE_COLOR # cv2 flag
except:
flag = cv2.IMREAD_COLOR # cv3 flag
#~ print( "DBG: convertToCvImage: flag is %s" % flag )
#~ print( "DBG: convertToCvImage: type data: %s" % type(self.data) )
#~ print( "DBG: convertToCvImage: len data: %s" % len(self.data) )
if len(self.data) == 0:
# empty image for some specific command
print("WRN: convertToCvImage: received an empty image - possible for some command like clear_db...")
img = None
else:
img = cv2.imdecode(np.fromstring(self.data, np.uint8), flag )
#print( "DBG: VersatileImage.convertToCvImage: decoded size: %s" % ( img.size ) )
return img
def addCommand( self, listParameter ):
"""
add a list of command and parameters to this image.
The remote will send information related to this work
eg:
- ["facedetection"] => list face and confidence as an answer
- ["facerecognition", "db_name", "learn", "some_name"] => True/False and confidence on learning (based on comparison on existing face)
- ["facerecognition", "db_name", "recognise"] => recognition result
- ["store"] => store this image on some disk
- ["show"] => show this image on screen
- ["broadcast"] => show this image to any viewer
"""
for param in listParameter:
if self.bVerbose: print( "INF: VersatileImage.addCommand: adding '%s'" % str(param) )
self.aCommand.append(param)
def toPacket( self ):
"""
convert object value to string
format: B: is grey, size of data_img, data_img
"""
#~ print( "DBG: VersatileValue.toPacket: packeting a value of type %s, value: '%s'" % ( str(self.nType), str(self.v) ) )
data = struct.pack('B', self.bIsGrey) \
+ struct.pack('<I', self.timeStamp[0]) \
+ struct.pack('<I', self.timeStamp[1]) \
+ struct.pack('<I', len(self.data)) \
+ stringToBytes(self.data)
data += Versatile.VersatileValue( self.aCommand ).toPacket()
return data
@staticmethod
def fromPacket( data ):
"""
decode a value from a packet
return aVersatileImage, size used by the value in data
"""
vi = Versatile.VersatileImage()
offset = 0
vi.timeStamp = [0,0]
if sys.version_info[0] == 3:
vi.bIsGrey = data[offset]; offset += 1
else:
vi.bIsGrey = struct.unpack_from('B',data[offset])[0]; offset += 1
vi.timeStamp[0] = struct.unpack_from('<I',data[offset:offset+4])[0] ; offset += 4
vi.timeStamp[1] = struct.unpack_from('<I',data[offset:offset+4])[0] ; offset += 4
nSize = struct.unpack_from('<I',data[offset:offset+4])[0] ; offset += 4
#vi.data = struct.unpack_from('B'*nSize,data[offset:offset+nSize]) # $$$$ found method to get a variable decoding!
vi.data=data[offset:offset+nSize] ; offset += nSize
#print( "datalen: %s" % (len(vi.data)) )
param, sizeParams = Versatile.VersatileValue.fromPacket(data[offset:])
print( "DBG: Versatile Image: decoded params: ")
for i in range( len(param) ):
print( "\t%s" % param[i])
vi.aCommand = param
offset += sizeParams
print( "DBG: Versatile Image: decoded params - end")
return vi, offset
#class VersatileImage - end
@staticmethod
def VersatileImage_autoTest():
print( "INF: VersatileImage_autoTest: starting..." )
vi = Versatile.VersatileImage()
w=640
h = 480
img = np.zeros((h,w,3), np.uint8)
for j in range(h):
for i in range(w):
img[j,i,0] = i*(j+3)
img = cv2.imread( "../camera_viewer_at_exit.png" )
vi.createFromCvImage( img, nFormat = Versatile.VersatileImage.Format.PNG)
imgOut = vi.convertToCvImage()
assert( np.array_equal( img, imgOut) )
vi.createFromCvImage( img, nFormat = Versatile.VersatileImage.Format.JPG)
imgOut = vi.convertToCvImage()
assert( not np.array_equal( img, imgOut) ) # different due to compression loss
vi.createFromCvImage( img, nFormat = Versatile.VersatileImage.Format.PNG)
data=vi.toPacket()
vi, size = Versatile.VersatileImage.fromPacket(data)
imgOut = vi.convertToCvImage()
assert( np.array_equal( img, imgOut) )
print( "INF: VersatileImage_autoTest: OK\n" )
#########################################################
# class Versatile method - start
def __init__( self, nPort = 10001, bVerbose = False ):
"""
"""
self.bVerbose = bVerbose
self.nPort = nPort
self.bMustStop = False
self.threadSendImage=None
self.aCameraSubs=[] # for each: client_socket,camera idx, reso, format, period, time last send
self.dictClientID = dict() # a dict adress of client_socket => logical client ID (as returned by nCommandType_CreateClientID and set by nCommandType_SetClientID
# NB: logical client ID is internally a number!!!
self.dictClientParams = dict() # a dict logical client ID => a dict of parameter (a dict of dict)
self.dictBrodcastedImages = dict() # a dict logical client ID => a list of stored versatileimages
def setVerbose( self, bVal ):
self.bVerbose = bVal
def registerToCamera( self, client_socket, nNumCamera, nResolution, nFormat, rPeriod = 0.2 ):
print( "INF: client %s register for a camera (num camera: %d, res: %d, format: %d, period: %5.2fs)" % ( str(client_socket.getpeername()),nNumCamera, nResolution, nFormat, rPeriod) )
self.aCameraSubs.append( [client_socket, nNumCamera, nResolution, nFormat, rPeriod, time.time()-100] )
#self.startThreadSendImage() # can't prevent launching many => done at construction!
def getNewImage( self, nNumCamera, nResolution, nFormat ):
"""
return (time stamp, image)
please inherits this method in your own program
"""
img = np.zeros((480,640,3), np.uint8)
inc = int(time.time()*100)
x = (inc%160)
y = (inc/160)%120
for i in range(10):
img[y,x+i,0] = 255 # make it move
img[y,x+i,1] = 255 # make it move
img[y,x+i,2] = 255 # make it move
return (time.time(),img)
def threadSendImageToClient( self ):
import threading
print( "DBG: threadSendImageToClient: start..." )
while( 1 ):
for i in range( len( self.aCameraSubs ) ):
client_socket, nNumCamera, nResolution, nFormat, rPeriod, timeLastSend = self.aCameraSubs[i]
if time.time() - timeLastSend >= rPeriod:
self.aCameraSubs[i][-1] = time.time()
ts, img = self.getNewImage( nNumCamera, nResolution, nFormat )
vi = Versatile.VersatileImage()
vi.createFromCvImage( img, ts )
vv = Versatile.VersatileValue( vi )
retVal = self._send( Versatile.nCommandType_Value, None, vv, optionnalClient = client_socket )
if retVal == False:
print( "INF: threadSendImageToClient: removing client %d" % i )
try:
del self.aCameraSubs[i]
except BaseException as err:
print( "WRN: error while deleting: %s" % str(err) )
break; # all indexs are offseted, let's restart iteration...
# HERE, we could finish this thread if no more subs
time.sleep(0.01)
#time.sleep(1.) # to debug
#~ print( "DBG: threading.currentThread().getName(): %s" % threading.currentThread().getName())
#~ print(".")
def startThreadSendImage( self ):
# todo: mutex
print( "DBG: startThreadSendImage: start" )
import multiprocessing
import threading
if self.threadSendImage != None:
print( "DBG: startThreadSendImage: already started !!!" )
return
#~ print(dir(multiprocessing))
# self.threadSendImageToClient() # pour tester sans multithreading (mais va locker apres le premier client)
#self.threadSendImage = multiprocessing.Process( target=self.threadSendImageToClient ).start() # on windows, objects are pickled and socket is'nt picklable so, raise errors
self.threadSendImage = True # to be sure
self.threadSendImage = threading.Thread( target=self.threadSendImageToClient ).start() # under windows ne semble pas lancer dans un thread, mais bloque tout le thread
print( "DBG: startThreadSendImage: end" )
def handleCommand( self, command, command_parameters, clientSocket = None ):
"""
please inherits this method in your own program.
return None on error
"""
if self.bVerbose: print( "DBG: Versatile.handleCommand: received command: %s,%s" % (command, command_parameters) )
if not hasattr(self, "simulatedMemory"):
self.simulatedMemory = dict() # to store command
if command == Versatile.nCommandType_Ping:
return 1
if command == Versatile.nCommandType_Value:
print( "WRN: Versatile.handleCommand: Abnormal case: Received this value: %s" % command_parameters )
return None
if command == Versatile.nCommandType_Get:
#valueFromALMemory = "(a value taken from ALMemory data named: '%s')" % command_parameters[0]
#valueFromALMemory = "toto"
strDataName = command_parameters[0]
try:
valueFromALMemory = self.simulatedMemory[strDataName]
except:
if self.bVerbose: print( "DBG: data '%s' isn't in the Memory" % (strDataName) )
valueFromALMemory = None # non existing value
return valueFromALMemory
if command == Versatile.nCommandType_Set:
strDataName = command_parameters[0]
value = command_parameters[1]
if self.bVerbose: print( "DBG: data '%s' should be set to %s" % (strDataName, value) )
self.simulatedMemory[strDataName] = value
return 1
if command == Versatile.nCommandType_CreateClientID:
time.sleep(0.5)
nNewClientID = int(time.time()*100)%0x80000000 # the goal is to ensure there won't be two client with same ID. (but that id is storable on a int32)
print( "INF: Versatile.handleCommand: creating a new client ID: %s" % nNewClientID )
return str(nNewClientID)
if command == Versatile.nCommandType_SetClientID:
value = str(command_parameters[0])
if id(clientSocket) in self.dictClientID.keys():
if self.dictClientID[id(clientSocket)] == value:
return 1 # don't do anything...
self.handleClientRemoveIdentification( clientSocket )
if self.bVerbose: print( "INF: client %s has idenfied himself as %s" % ( str(id(clientSocket)), value ) )
self.dictClientID[id(clientSocket)] = value
self.handleClientIdentified( clientSocket )
return 1
if command == Versatile.nCommandType_GetClientParam:
#valueFromALMemory = "(a value taken from ALMemory data named: '%s')" % command_parameters[0]
#valueFromALMemory = "toto"
strDataName = command_parameters[0]
try:
clientID = self.dictClientID[id(clientSocket)]
valueFromParams = self.dictClientParams[clientID][strDataName]
except:
if self.bVerbose: print( "DBG: data '%s' isn't in the client params" % (strDataName) )
valueFromParams = None # non existing value
return valueFromParams
if command == Versatile.nCommandType_SetClientParam:
clientID = self.dictClientID[id(clientSocket)]
strDataName = command_parameters[0]
value = command_parameters[1]
if clientID not in self.dictClientParams.keys():
if self.bVerbose: print( "INF: Versatile.handleCommand: creating a client param id for client %s" % clientID )
self.dictClientParams[clientID] = dict()
self.dictClientParams[clientID][strDataName] = value
return 1
if command == Versatile.nCommandType_SubscribeCamera:
if self.bVerbose: print( "DBG: SubscribeCamera: command_parameters: %s" % str(command_parameters) )
self.registerToCamera( clientSocket, command_parameters[0], command_parameters[1], command_parameters[2], 1./command_parameters[3] )
return 1
if command == Versatile.nCommandType_GetBroadcasted:
#valueFromALMemory = "(a value taken from ALMemory data named: '%s')" % command_parameters[0]
#valueFromALMemory = "toto"
strBroadcasterID = command_parameters[0]
nBroadcasterID = int(strBroadcasterID)
if not nBroadcasterID in self.dictBrodcastedImages.keys():
print( "ERR: Versatile.handleCommand: broadcaster %d unknown" % nBroadcasterID )
return 0 # unknown client
return self.dictBrodcastedImages[nBroadcasterID][-1]
print( "ERR: Versatile.handleCommand: Abnormal case: unknown command type: %s" % command )
return None
# handleCommand - end
def handleNewClientArrival( self, client ):
"""
A new client arrived, do what you want in inherited classes...
"""
pass
def handleClientIdentified( self, client ):
"""
A client has identified, do what you want in inherited classes...
"""
pass
def handleClientRemoveIdentification( self, client ):
"""
this client want to stop using this identification, do what you want in inherited classes...
"""
pass
def handleClientLeft( self, client ):
"""
Sad news: client left, do what you want in inherited classes...
"""
pass
def manageClientRequests( self, client ):
self.handleNewClientArrival( client )
try:
while 1:
command, command_parameters = Versatile._waitPacket(client, self.bVerbose)
if command == Versatile.nCommandType_EOC:
client.close()
break
if self.bVerbose: print( "DBG: versatile.manageClientRequests: before handling command..." )
valueToReturn = self.handleCommand( command, command_parameters, client )
self._send( Versatile.nCommandType_Value, None, valueToReturn, client )
except socket.error as err:
print( "ERR: when working with client, received error: %s" % err )
client.close()
self.handleClientLeft(client)
def runServer( self ):
"""
run an infinite server
"""
import threading
self.startThreadSendImage()
self.socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
self.socket_server.bind( ('', self.nPort ) )
break
except OSError as err:
print( "ERR: connection error: %s" % str(err) )
print( "ERR: retrying in 2 sec..." )
time.sleep( 2 )
print( "INF: versatile.runServer: server ready for client connection on port %s..." % self.nPort )
self.bMustStop = False
while not self.bMustStop:
try:
self.socket_server.listen(5)
client, address = self.socket_server.accept()
print( "Versatile: client connect from %s" % str(address) )
#self.manageClientRequests(client) # only one at a time !
threading.Thread( target=self.manageClientRequests, args=(client,) ).start() # the ',' after 'client' is important else it's not a tuple
except socket.error as err:
print( "ERR: when working with client, received error: %s" % err )
client.close()
self.socket_server.close()
print( "INF: versatile.runServer: stopped." )
def stopServer( self ):
print( "INF: versatile.stopServer: stopping..." )
self.bMustStop = True # known bug: we need to get out of the listen to be stopped !!! :(
self.disconnect()
try:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect( ("localhost", self.nPort)) # so we generate a socket to ourself, weird, but works fine.
except Exception as err:
print( "DBG: stopServer: socket already closed? err: %s" % str(err) )
def _reconnect( self, bFailSilently = False ):
try:
print( "INF: Versatile: connecting to %s:%d" % (self.ip, self.nPort) )
self.socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_server.connect((self.ip, self.nPort))
except socket.error:
if not bFailSilently:
raise socket.error( "cannot connect to %s:%d" % (self.ip, self.nPort) )
else:
print( "WRN: can't (re)connect to %s:%d" % (self.ip, self.nPort) )
def isConnect( self ):
pass #TODO
def connect( self, strIP, bPatientMode = True):
"""
bPatientMode: if set, even if server not present at start, the module will wait
"""
self.ip = strIP
self._reconnect(bFailSilently=bPatientMode)
def disconnect( self ):
self.socket_server.shutdown(socket.SHUT_RDWR);
self.socket_server.close()
@staticmethod # why !?!
def _waitPacket( socket_object, bVerbose = False ):
"""
return a decoded packet as a tuple: (nCommand, command_parameters...)
can return a nCommand == EOC: connection closed
"""
p,size = _waitSizedData(socket_object,bVerbose)
if p == None:
return (Versatile.nCommandType_EOC,None)
nCommand = struct.unpack_from("B",p)[0]
offset = 1
if bVerbose: print( "DBG: Versatile._waitPacket: receive command: %d (%s)" % (nCommand,Versatile.commandNumToLibelle(nCommand)) )
commandParameters = []
if Versatile.isCommandRequiringDataName( nCommand ):
strDataName, size = Versatile.VersatileString.fromPacket(p[offset:])
commandParameters.append( strDataName )
offset+=size
if Versatile.isCommandRequiringValue( nCommand ):
value, size = Versatile.VersatileValue.fromPacket(p[offset:])
commandParameters.append( value )
offset+=size
if nCommand == Versatile.nCommandType_SubscribeCamera:
for i in range(4):
value, size = Versatile.VersatileValue.fromPacket(p[offset:])
commandParameters.append( value )
offset+=size
return (nCommand, commandParameters)
@staticmethod
def _waitValue( socket_object ):
"""
return a decoded value as a python object (but encoded in a packet for more security)
"""
#data,size = _waitSizedData(socket_object)
#value, size = Versatile.VersatileValue.fromPacket(data)
nCommand, commandParameters = _waitPacket( socket_object )
assert( nCommand == Versatile.nCommandType_Value )
return value
def _send( self, nNumCommand, strDataName = None, value = None, optionnalClient = None ):
"""
send packet to peer
strDataName: in case of a stm mode
value: a list of value to send
optionnalClient: filed if the process is a server, permits to know the client (at least to print a debug msg)
return:
- False on error, or (nCommand, command_parameters...) in case of returned value from complex command
"""
if self.bVerbose: print( "DBG: Versatile._send: sending command %s (%s), dataname: %s, value: %s" %(nNumCommand,Versatile.commandNumToLibelle(nNumCommand),strDataName, value) )
data = struct.pack('B', nNumCommand)
if nNumCommand == Versatile.nCommandType_SubscribeCamera:
# value is a tuple
for v in value:
print( "DBG: _send: encoding command parameter: %s" % v )
if self.bVerbose: print( "DBG: _send: encoding command parameter: %s" % v )
vv = Versatile.VersatileValue(v)
data += vv.toPacket()
else:
# ca mériterais un petit refactor ca (ce cas particulier est vraiment pas beau)
if strDataName != None:
vs = Versatile.VersatileString(strDataName)
vv = Versatile.VersatileValue( value ) # in any case we can create a value, even if it's None
if Versatile.isCommandRequiringDataName( nNumCommand ):
# we encode the dataname
data += vs.toPacket()
if Versatile.isCommandRequiringValue( nNumCommand ):
# we encode a value
data += vv.toPacket()
if self.bVerbose and 0:
nPrintLen = 64
print( "DBG: Versatile._send: data to send (without encoded len) (first %d bytes):" % nPrintLen )
for i in range(min(nPrintLen, len(data))):
if isinstance(data[i], int):
print( "0x%X " % data[i] ),
else:
print( "s0x%X " % ord(data[i]) ),
print("" ) # \n
data = struct.pack('<I', len(data)) + data
if self.bVerbose: print( "DBG: Versatile._send: sending a packet of total size: %s" % str(len(data)) )
socket_object = optionnalClient
self.bCustomClientSocket = True
if socket_object == None:
self.bCustomClientSocket = False
socket_object = self.socket_server
try:
socket_object.sendall( data )
except socket.error as err:
if self.bCustomClientSocket:
print( "WRN: Versatile.send: got socket error, client disappear? err: %s" % str(err) )
return False
print( "WRN: Versatile.send: got socket error, serveur out, try to reconnect, err: %s" % str(err) )
# ne fonctionne pas sous windows?
self._reconnect(bFailSilently = True)
socket_object = self.socket_server
try:
socket_object.sendall( data )
except socket.error as err:
print( "DBG: Versatile.send: socket disconnected, skipping this message, will try to reconnect next time..., err: %s" % str(err) )
return False
if self.bVerbose: print( "DBG: command sent..." )
if nNumCommand == Versatile.nCommandType_Value:
# a returned value never wait for ack
return [1,[True]]
if self.bVerbose: print( "DBG: waiting for an answer..." )
retVal = Versatile._waitPacket(socket_object)
if self.bVerbose: print( "DBG: waiting for an answer: received..." )
return retVal
def waitPacket( self ):
"""
wait for some answer
"""
retVal = Versatile._waitPacket(self.socket_server)
if self.bVerbose: print( "DBG: waitPacket: returning: %s" % str(retVal ) )
return retVal
def isRunning( self ):
try:
retVal = self._send( Versatile.nCommandType_Ping )
if self.bVerbose: print( "DBG: Versatile: isRunning: received: %s" % str(retVal) )
return retVal != False and retVal[1][0] == 1
except Exception as err:
if self.bVerbose: print( "DBG: Versatile: isRunning: received this err: %s" % str(err) )
return False
############### accessor to each command type - use them in your program
def get( self, strDataName ):
"""
return a pair: sucess,value
"""
retVal = self._send( Versatile.nCommandType_Get, strDataName )
if retVal == False:
return [False,None]
return [True, retVal[1][0] ]
def set( self, strDataName, value ):
retVal = self._send( Versatile.nCommandType_Set, strDataName, value )
#~ print( "DBG: Versatile.set: retVal: %s" % str(retVal) )
return retVal != False and retVal[1][0] == 1
def sendValue( self, value ):
retVal = self._send( Versatile.nCommandType_Value, None, value )
return retVal != False and retVal[1][0] == 1
def sendValueGeneratingResults( self, value ):
"""
when sending a value generating a result, like an image and its analyse...
"""
retVal = self._send( Versatile.nCommandType_Value, None, value )
if self.bVerbose: print( "DBG: sendValueGeneratingResults: retVal: %s" % str(retVal) )
if retVal == False or retVal[1][0] == 0:
return False # error in sending values
return self.waitPacket()
def createClientID(self):
retVal = self._send( Versatile.nCommandType_CreateClientID )
if retVal == False:
return [False,None]
return [True, retVal[1][0] ]
def setClientID(self, clientID):
retVal = self._send( Versatile.nCommandType_SetClientID, None, str(clientID) )
return retVal != False and retVal[1][0] == 1
def setClientParam(self, strParamName, value ):
retVal = self._send( Versatile.nCommandType_SetClientParam, str(strParamName), value )
return retVal != False and retVal[1][0] == 1
def getClientParam(self, strParamName, value ):
retVal = self._send( Versatile.nCommandType_GetClientParam, str(strParamName) )
if retVal == False:
return [False,None]
return [True, retVal[1][0] ]
def subscribeCamera(self, nCameraIndex, nWantedResolution, nImageFormat, nFps ):
retVal = self._send( Versatile.nCommandType_SubscribeCamera, None, (nCameraIndex,nWantedResolution,nImageFormat, nFps ) )
return retVal != False and retVal[1][0] == 1
def getImage( self ):
"""
get an Image from a previously subscribed server
return False on error
"""
retVal = self.waitPacket()
print("DBG: Versatile.getImage: retVal: %s" % str(retVal) )
command, param = retVal
if self.bVerbose: print("DBG: run_camera_subscriber: param: %s" % str(param))
if param != None and len(param)> 0:
data = param[0][0]
if 1: # data.nType == Versatile.VersatileValue.nValueType_Image: # and command = Versatile.nCommandType_Value
if self.bVerbose: print( "DBG: Versatile.getImage: image received" )
im = data.convertToCvImage()
if self.bVerbose: print( "DBG: Versatile.getImage: converted: shape: %s" % str(im.shape) )
return im
return False
# getImage - end
def getBrodcastedImage( self, strBroadcasterID ):
"""
get a (redundant) pair [Versatile Image, opencv image] from a broadcaster
return False on error
"""
retVal = self._send( Versatile.nCommandType_GetBroadcasted, str(strBroadcasterID) )
if self.bVerbose: print("DBG: getImage: retVal: %s" % str(retVal) )
if retVal == None:
return False
command, param = retVal
if self.bVerbose: print("DBG: run_camera_subscriber: param: %s" % str(param))
if param != None and len(param)> 0:
data = param[0][0]
if 1: # data.nType == Versatile.VersatileValue.nValueType_Image: # and command = Versatile.nCommandType_Value
if self.bVerbose: print( "DBG: image received" )
im = data.convertToCvImage()
if self.bVerbose: print( "DBG: converted: shape: %s" % str(im.shape) )
return data,im
return False
# getBrodcastedImage - end
# class Versatile - end
def connectAndSend( strIP, strDataName, value = None ):
v = Versatile()
v.connect( strIP )
if value == None:
retVal = v.get( strDataName )
#retVal = v.get( strDataName ) #test 2 calls in a raw
else:
retVal = v.set( strDataName, value )
print( "connectAndSend: finished with value: %s" % str(retVal) )
def run_server(nPort=10001):
print( "INF: versatile: starting server..." )
v = Versatile(nPort=nPort)
v.runServer()
def run_camera_subscriber( strServerIP, nPort=10001, bVerbose=False ):
"""
duplicate from test_get_image
"""
v = Versatile(nPort=nPort)
v.connect( strServerIP )
v.subscribeCamera( 0,Versatile.VersatileImage.WantedResolution.VGA,Versatile.VersatileImage.Format.PNG, 30 )
nCptImage = 0
nCptImageTotal = 0
timeBegin = time.time()
while 1:
im = v.getImage()
if not im is False:
cv2.imshow("received", im)
if bVerbose: print( "DBG: drawed" )
nKey = cv2.waitKey(1) & 0xFF
bEnd = ( nKey == ord( 'q' ) or nKey == ord( 'Q' ) )
if bEnd:
break
nCptImage +=1
nCptImageTotal += 1
if nCptImage > 50:
print( "INF: fps: %5.1f (total image: %d)" % (nCptImage/(time.time()-timeBegin), nCptImageTotal) )
nCptImage = 0
timeBegin = time.time()
else:
print( "WRN: reception error...")
# run_camera_subscriber - end
def autoTest():
"""
autotest as a client
"""
Versatile.VersatileImage.autoTest()
# system( "python %s start_server" % sys.argv )
pass # a bit complicated
if( __name__ == "__main__" ):
#Versatile.VersatileImage_autoTest()
def print_syntax():
print( "" )
print( "syntax: python %s remote_ip variable_name [value]" % sys.argv[0])
print( "syntax: to start a server: python %s start_server" % sys.argv[0] )
print( "syntax: to subscribe images from a server: python %s remote_ip subscribe_camera" % sys.argv[0] )
print( "" )
print( "To start a local test:\nshell 1: python %s start_server\nshell 2: python %s localhost myTime 12h34 & python %s localhost myTime" % (sys.argv[0],sys.argv[0],sys.argv[0]))
exit( 0 )
nPort = 10001
# eat options
i = 1
while i < len(sys.argv):
if sys.argv[i][0] == '-':
if sys.argv[i][1] == 'p':
nPort = int(sys.argv[i+1])
print( "INF: changing to port %d" % (nPort) )
del sys.argv[i]
del sys.argv[i] # was the i+1
else:
i += 1
if len(sys.argv) < 2:
print_syntax()
strIP = sys.argv[1]
if strIP == "start_server":
run_server(nPort=nPort)
exit(1)
if len(sys.argv) < 3:
print_syntax()
strDataName = sys.argv[2]
if strDataName == "subscribe_camera": # TODO: en cours de test: lancer un serveur d'un coté et subscribe_camera de l'autre
run_camera_subscriber(strIP,nPort)
exit(1)
strValue = None
if len(sys.argv)>3:
strValue = sys.argv[3]
retVal = connectAndSend( strIP, strDataName, strValue )
print( "return:\n%s" % str(retVal) )
| alexandre-mazel/electronoos | scripts/versatile/versatile.py | versatile.py | py | 54,037 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "struct.pack",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
15554629273 | from django.contrib import admin
from .models import Faq
class FaqAdmin(admin.ModelAdmin):
list_display = (
'title',
'writer',
'hits',
'registered_date',
)
search_fields = ('title', 'content', 'writer__user_id',)
admin.site.register(Faq, FaqAdmin)
| SangjunDev/ROHDE-SCHWARZ_Local | faq/admin.py | admin.py | py | 302 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 13,
"usage_type": "call"
},... |
1911947262 | from typing import List
from core.point import POINT_AT_INFINITY, Point
from utils.int_operations import is_prime
from utils.mod_operations import divide, is_square, square_root
class EllipticCurve:
def __init__(self, a, b, p) -> None:
# if self.is_non_singular():
self.__a = a
self.__b = b
self.__p = p
self.__points = self.get_points()
def is_non_singular(self):
return 4 * self.__a ** 3 + 27 * self.__b ** 2 != 0
def get_points(self) -> List[Point]:
points = []
for x in range(0, self.__p):
global y
x_part = x ** 3 + self.__a * x + self.__b
if is_square(x_part, self.__p):
y = square_root(x_part, self.__p)
else:
continue
p1 = Point(x, y)
if p1.get_y() != 0:
p2 = Point(x, self.__p - y)
points.append(p2)
points.append(p1)
return points
def is_on_curve(self, p: Point):
x, y = p.get_x(), p.get_y()
return (y ** 2) % self.__p == (x ** 3 + self.__a * x + self.__b) % self.__p
def add(self, p1: Point, p2: Point):
if self.is_on_curve(p1) and self.is_on_curve(p2):
x1, y1 = p1.get_x(), p1.get_y()
x2, y2 = p2.get_x(), p2.get_y()
if (p1.equals_to(p2) and y1 == 0) or (not p1.equals_to(p2) and x1 == x2):
return POINT_AT_INFINITY
global slope
if p1.equals_to(p2):
slope = divide(3 * x1 ** 2 + self.__a, 2 * y1, self.__p)
else:
slope = divide(y2 - y1, x2 - x1, self.__p)
x_sum = (slope ** 2 - x1 - x2) % self.__p
y_sum = (slope * (x1 - x_sum) - y1) % self.__p
return Point(x_sum, y_sum)
if p1.equals_to(POINT_AT_INFINITY) and self.is_on_curve(p2):
return p2
if p2.equals_to(POINT_AT_INFINITY) and self.is_on_curve(p1):
return p1
return POINT_AT_INFINITY
def multiply(self, p: Point, k: int):
point_prod = Point(p.get_x(), p.get_y())
if self.is_on_curve(p) and k > 0:
for _ in range(1, k):
point_prod = point_prod.assign(self.add(point_prod, p))
return point_prod
return POINT_AT_INFINITY
def get_times_table(self, p: Point):
k = 1
table = f'P({p.get_x()}, {p.get_y()}):\n'
while not self.multiply(p, k).equals_to(POINT_AT_INFINITY):
point_prod = self.multiply(p, k)
table += f' - {k if k > 1 else ""}P = {point_prod.to_string()}\n'
k += 1
table += f' - {k}P = {POINT_AT_INFINITY.to_string()}\n'
return table
def get_times_tables(self):
tables = f'Bảng cửu chương của {self.to_string()}'
for i, p in enumerate(self.__points):
k = 1
table = f'P_{i}({p.get_x()}, {p.get_y()})'
while not self.multiply(p, k).equals_to(POINT_AT_INFINITY):
point_prod = self.multiply(p, k)
table += f' - {k if k > 1 else ""}P_{i} = {point_prod.to_string()}\n'
k += 1
table += f' - {k}P_{i} = {POINT_AT_INFINITY.to_string()}\n'
tables += f'{table}\n'
return tables
def to_string(self):
return f'y^2 = x^3 {"+" if self.__a > 0 else "-"} {abs(self.__a) if abs(self.__a) != 1 else ""}x {"+" if self.__b > 0 else "-"} {abs(self.__b)} (mod {self.__p})'
def count_points(self):
return len(self.__points) + 1 # tính cả điểm vô cực
def is_prime_points_count(self):
return is_prime(self.count_points()) | duycao2001/elliptic_curve | elliptic-curve-python/core/elliptic_curve.py | elliptic_curve.py | py | 3,694 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.mod_operations.is_square",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.mod_operations.square_root",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "core.point.Point",
"line_number": 28,
"usage_type": "call"
},
{
"... |
27663467805 | # coding: utf-8
from flask import url_for
from wtforms.widgets import html_params, HTMLString
from cgi import escape
from wtforms.compat import text_type
class VerifyCode(object):
html_params = staticmethod(html_params)
def __call__(self, field, **kwargs):
if field.hidden == True:
html = '<input %s>' % self.html_params(
id=field.id,
type='hidden',
name=field.name,
value=field._value(),
)
else:
html = '<div class="input-group input-group-lg">'
html += '<input %s>' % self.html_params(
id=field.id,
type='text',
name=field.name,
value=field._value(),
maxlength=4,
**kwargs
)
html += '<span class="input-group-addon" style="padding:0px;"><img %s></span>' % self.html_params(
id='%s_img' % field.id,
src=url_for('verify_code', key=field.key),
data_src=url_for('verify_code', key=field.key),
style='cursor:pointer;',
onclick="$(this).attr('src', '" + url_for('verify_code', key=field.key) + "&t=' + Math.random());"
)
html += '</div>'
return HTMLString(html)
class UEditor(object):
html_params = staticmethod(html_params)
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('type', 'text/plain')
kwargs.setdefault('style', 'width:99%;height:360px;')
kwargs['class'] = ''
return HTMLString('<script %s>%s</script><script>var um = UM.getEditor("%s");</script>' % (html_params(name=field.name, **kwargs), text_type(field._value()), field.name)) | endsh/haoku-open | simin/simin/web/forms/widgets.py | widgets.py | py | 1,490 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "wtforms.widgets.html_params",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.url_for",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.url_for... |
14656769144 | import warnings
warnings.filterwarnings("ignore")
from jax_sandbox.common import *
from jax_sandbox.imitation import *
from jax_sandbox.actor_critic import *
from jax_sandbox.policy_gradient import *
from jax_sandbox.value_based_methods import *
import hydra
import envs.dmc as dmc
from utils import *
def make_env(env_name, seed):
if env_name in ['walker_walk', 'cheetah_run', 'humanoid_walk', 'finger_turn_hard', 'cartpole_swingup', 'hopper_hop', 'quadruped_walk', 'reacher_hard']:
env = dmc.make(env_name, seed=seed)
else:
env = make_gym_env(env_name, seed)
return env
def get_observation_action_spec(env):
if hasattr(env, 'observation_space'):
obs_shape = env.observation_space.shape
if isinstance(env.action_space, gym.spaces.Box):
action_shape = env.action_space.shape[0]
else:
action_shape = env.action_space.n
else:
obs_shape = env.observation_spec().shape
action_shape = env.action_spec().shape
return obs_shape, action_shape
OFFLINE_ALGOS = ['cql', 'td3_bc', 'milo']
class Workspace:
def __init__(self, cfg):
self.cfg = cfg
self.setup()
print('done setting up')
if cfg.alg == 'bc':
self.learner = bc.BC(cfg)
elif cfg.alg == 'gail':
self.learner = gail.GAIL(cfg)
elif cfg.alg == 'reinforce':
self.learner = reinforce.REINFORCE(cfg)
elif cfg.alg == 'ddpg':
self.learner = ddpg.DDPG(cfg)
elif cfg.alg == 'sac':
self.learner = sac.SAC(cfg)
elif cfg.alg == 'dqn':
self.learner = dqn.DQN(cfg)
else:
raise ValueError('RL algorithm not implemented yet.')
def setup(self):
# setup env stuff and fill in unknown cfg values
self.train_env = make_env(self.cfg.task, self.cfg.seed)
self.eval_env = make_env(self.cfg.task, self.cfg.seed)
self.cfg.obs_shape, self.cfg.action_shape = get_observation_action_spec(self.train_env)
self.cfg.continuous = is_discrete(self.cfg.task)
self.cfg.img_input = len(self.cfg.obs_shape) == 3
# dataset/dataloader
@hydra.main(config_path="cfgs", config_name="config")
def main(cfg):
ws = Workspace(cfg)
if __name__ == '__main__':
main() | dhruvsreenivas/jax_sandbox | main.py | main.py | py | 2,384 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "envs.dmc.make",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "envs.dmc",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "hydra.main",
"line_... |
30500422685 | import os
import click
import numpy as np
import pandas as pd
import os.path as op
import nibabel as nib
from tqdm import tqdm
from glob import glob
from joblib import Parallel, delayed
from nilearn import masking, image
from nistats.design_matrix import make_first_level_design_matrix
from nistats.first_level_model import run_glm
from nistats.contrasts import compute_contrast, expression_to_contrast_vector
from nistats.second_level_model import SecondLevelModel
TASK_INFO = dict(
anticipation=dict(
contrast=['img_negative - img_neutral', 'cue_negative - cue_neutral'],
name=['imgnegGTimgneu', 'cuenegGTcuepos']
),
workingmemory=dict(
contrast=['active_change + active_nochange - 2*passive', 'active_change - active_nochange'],
name=['activeGTpassive', 'changeGTnochange']
),
gstroop=dict(
column='response_accuracy',
contrast=['incorrect - correct'],
name=['incorrectGTcorrect']
),
faces=dict(
contrast=['joy + anger + pride + contempt - 4*neutral'],
name=['emoexpGTneutral']
),
emomatching=dict(
contrast=['emotion - control'],
name=['emotionGTcontrol']
),
stopsignal=dict( # only PIOP2
contrast=['unsuccesful_stop - go', 'succesful_stop - go', 'unsuccesful_stop - succesful_stop'],
name=['failedstopGTgo', 'succesfulstopGTgo', 'unsuccesfulstopGTsuccesfulstop']
)
)
def fit_firstlevel(bids_dir, func, task, space, out_dir):
sub_base = op.basename(func).split('_')[0]
conf = func.split('space')[0] + 'desc-confounds_regressors.tsv'
conf = pd.read_csv(conf, sep='\t')
cols = [col for col in conf.columns if 'cosine' in col]
cols += ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']
conf = conf.loc[:, cols]
events = op.join(bids_dir, sub_base, 'func', op.basename(func).split('space')[0] + 'events.tsv')
events = pd.read_csv(events, sep='\t')
if 'column' in TASK_INFO[task].keys():
events = events.drop('trial_type', axis=1)
events['trial_type'] = events.loc[:, TASK_INFO[task]['column']]
n_correct = np.sum(events['trial_type'] == 'correct')
prop_correct = n_correct / events.shape[0]
if prop_correct < 0.2:
print(f"{func}: {prop_correct}")
events['trial_type'] = events['trial_type'].replace({'correct': 'incorrect', 'miss': 'miss', 'incorrect': 'correct'})
if 'fs' in space:
func_vol = func.split('space')[0] + 'space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
hdr = nib.load(func_vol).header
Y = np.vstack([arr.data for arr in nib.load(func).darrays])
else:
mask = func.replace('preproc_bold', 'brain_mask')
Y = masking.apply_mask(func, mask)
hdr = nib.load(func).header
tr, nvol = hdr['pixdim'][4], hdr['dim'][4]
frame_times = np.linspace(0.5 * tr, tr * nvol, num=nvol, endpoint=False)
dm = make_first_level_design_matrix(
frame_times=frame_times,
events=events,
hrf_model='glover',
drift_model=None,
add_regs=conf.values,
add_reg_names=conf.columns.tolist()
)
Y -= Y.mean(axis=0)
X = dm.to_numpy()
labels, results = run_glm(Y=Y, X=X, noise_model='ar1')
sub_out = op.join(out_dir, sub_base, 'firstlevel')
if not op.isdir(sub_out):
os.makedirs(sub_out)
for contrast, name in zip(TASK_INFO[task]['contrast'], TASK_INFO[task]['name']):
items = contrast.replace('-', '').replace('+', '').replace('*', '').split(' ')
items = [i for i in items if i]
trial_types = events['trial_type'].unique().tolist()
for item in items:
item = ''.join([i for i in item if not i.isdigit()])
if item not in trial_types:
break # condition not present in event file
else: # first time I've used a for-else clause I think
con_val = expression_to_contrast_vector(contrast, dm.columns)
con = compute_contrast(labels, results, con_val)
#stats = flm.compute_contrast(contrast, output_type='all')
f_base = op.basename(func).split('.')[0]
f_base += f"_contrast-{name}"
if 'fs' in space:
f_out = op.join(sub_out, f_base + '_beta.npy')
np.save(f_out, con.effect_size())
np.save(f_out.replace('beta', 'varbeta'), con.effect_variance())
else:
f_out = op.join(sub_out, f_base + '_beta.nii.gz')
masking.unmask(con.effect_size(), mask).to_filename(f_out)
masking.unmask(con.effect_variance(), mask).to_filename(f_out.replace('beta', 'varbeta'))
@click.command()
@click.argument('bids_dir', type=click.Path())
@click.argument('out_dir', required=False, type=click.Path())
@click.argument('level', default='participant')
@click.option('--task', default='workingmemory')
@click.option('--space', default='MNI152NLin2009cAsym')
@click.option('--smoothing', default=None, type=click.FLOAT)
@click.option('--n_jobs', default=1, type=int)
def main(bids_dir, out_dir, level, task, space, smoothing, n_jobs):
""" BIDS-app format. """
if out_dir is None:
out_dir = op.join(bids_dir, 'derivatives', 'task_fmri')
if level == 'participant':
ext = 'func.gii' if 'fs' in space else 'desc-preproc_bold.nii.gz'
fprep_dir = op.join(bids_dir, 'derivatives', 'fmriprep')
funcs = sorted(glob(op.join(
fprep_dir, 'sub-*', 'func', f'*task-{task}_*_space-{space}*{ext}'
)))
print(op.join(
fprep_dir, 'sub-*', 'func', f'*task-{task}_*_space-{space}*{ext}'
))
_ = Parallel(n_jobs=n_jobs)(delayed(fit_firstlevel)(bids_dir, f, task, space, out_dir) for f in tqdm(funcs))
else:
for cname in TASK_INFO[task]['name']:
ext = 'npy' if 'fs' in space else 'nii.gz'
to_iter = ['_hemi-L', '_hemi-R'] if 'fs' in space else ['']
for s in to_iter:
betas = sorted(glob(op.join(out_dir, 'sub-*', 'firstlevel', f'*task-{task}_*{s}_contrast-{cname}_beta.{ext}')))
if not betas:
print(f"WARNING: did not find betas for contrast {cname}!")
continue
dm = pd.DataFrame(np.ones(len(betas)), columns=['intercept'])
if 'fs' not in space:
if smoothing is not None:
betas = [image.smooth_img(b, smoothing) for b in betas]
mean_img = image.mean_img(betas)
mask = (mean_img.get_fdata() != 0).astype(int)
mask = nib.Nifti1Image(mask, affine=mean_img.affine)
Y = masking.apply_mask(betas, mask)
else:
Y = np.vstack([np.load(b) for b in betas])
labels, results = run_glm(Y, dm.values, noise_model='ols', n_jobs=n_jobs)
group_result = compute_contrast(labels, results, [1], contrast_type='t')
if 'fs' in space:
f_out = op.join(out_dir, f'task-{task}_contrast-{cname}{s}_desc-grouplevel_zscore.npy')
np.save(f_out, group_result.z_score())
else:
f_out = op.join(out_dir, f'task-{task}_contrast-{cname}_desc-grouplevel_zscore.nii.gz')
to_save = masking.unmask(group_result.z_score(), mask)
to_save.to_filename(f_out)
if __name__ == '__main__':
main()
| NILAB-UvA/AOMIC-common-scripts | misc_qc/run_task_fmri_models.py | run_task_fmri_models.py | py | 7,579 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "os.path.basename",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_num... |
36192451748 | import logging
from time import sleep
from collections import namedtuple
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (
NoSuchElementException,
ElementClickInterceptedException,
WebDriverException,
)
from dateparser import parse as parse_date
ParkingSession = namedtuple(
"ParkingSession", "LicensePlate LocationNumber ExpiryDate RateOption"
)
class Bot:
"""
Manages a WebDriver to perform various actions
"""
LOGIN_URL = "https://m2.paybyphone.fr/login"
PARKING_URL = "https://m2.paybyphone.fr/parking"
PARK_URL = "https://m2.paybyphone.fr/parking/start/location"
DURATION_URL = "https://m2.paybyphone.fr/parking/start/duration"
CONFIRM_URL = "https://m2.paybyphone.fr/parking/start/confirm"
GDPR_BUTTON_XPATH = "/html/body/div[3]/md-dialog/section/footer/button"
PARKING_SESSIONS_XPATH = "/html/body/div/section/md-content/div[4]"
CONFIRM_BUTTON_XPATH = (
"/html/body/div/section/pbp-parking-confirm/md-content/button"
)
def __init__(self, driver_name: str):
driver_name = driver_name.casefold()
if driver_name == "phantomjs":
# --disk-cache=true allows to keep a cache
self.driver = webdriver.PhantomJS(service_args=["--disk-cache=true"])
elif driver_name in [
"chrome",
"chrome-headless",
"chromium",
"chromium-headless",
]:
# TODO: find option to use custom profile with cache
chrome_options = ChromeOptions()
chrome_options.add_argument("--disable-gpu") # most important line
chrome_options.add_argument("--disable-extensions")
if driver_name.endswith("-headless"):
chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
elif driver_name in ["firefox", "firefox-headless"]:
# not working, disable gpu?
options = FirefoxOptions()
if driver_name.endswith("-headless"):
options.add_argument("-headless")
self.driver = webdriver.Firefox(firefox_options=options)
else:
raise Exception("Driver unknown")
def send_keys(self, *args):
"""
Wrapper to send keys to the active element
"""
self.driver.switch_to_active_element().send_keys(*args)
def get_el(self, xpath, sleepTime=1, attempts=10):
"""
"""
for _ in range(attempts):
try:
el = self.driver.find_element_by_xpath(xpath)
except NoSuchElementException:
sleep(sleepTime)
else:
return el
raise TimeoutError
def connect(self, login, pwd):
"""
Parameters
----
login : str
pwd : str
Returns
-------
success : bool
"""
# TODO: handle wrong password
self.driver.get(self.LOGIN_URL)
self.send_keys(Keys.TAB)
self.send_keys(Keys.TAB)
self.send_keys(login)
self.send_keys(Keys.TAB)
self.send_keys(pwd)
self.send_keys(Keys.ENTER)
gdpr = self.get_el(self.GDPR_BUTTON_XPATH)
while True:
try:
gdpr.click()
except (ElementClickInterceptedException, WebDriverException):
# ElementClickInterceptedException for Firefox
# WebDriverException for Chromium
sleep(1)
else:
break
# dimiss cookies banner that causes problems
# for payment
button = next(
b
for b in self.driver.find_elements_by_tag_name("button")
if b.text == "DISMISS"
)
button.click()
return True
def get_parking_sessions(self):
"""
List of current parking sessions
"""
self.driver.get(self.PARKING_URL)
parking_sessions = self.get_el(self.PARKING_SESSIONS_XPATH)
sessions = parking_sessions.find_elements_by_class_name("pbp-parking-session")
return list(map(self.parse_parking_session, sessions))
def pay(
self,
plate: str,
location: str,
rate: str,
duration: int,
check_cost: str or None = None,
):
"""
Parameters
----------
plate : str
location : str
rate : str
"RES", "VIS", "PRO-SAD" or other
duration : int or str
minutes if VIS, else days
check_cost : None or str
if string, will check that the cost is correct
Returns
-------
success : bool
"""
self.driver.get(self.PARK_URL)
# There are two kinds of menus
# TODO: factor code
if self.driver.find_elements_by_class_name("option-label"):
selected, *choices = [
e.get_attribute("innerHTML")
for e in self.driver.find_elements_by_class_name("option-label")
]
idx_selected = choices.index(selected)
try:
idx_target = choices.index(plate)
except ValueError:
logging.error("plate not found")
return False
delta = idx_target - idx_selected
self.send_keys(Keys.TAB)
self.send_keys(Keys.TAB)
if delta:
self.send_keys(Keys.SPACE)
sleep(0.5)
for i in range(-delta):
self.send_keys(Keys.UP)
for i in range(delta):
self.send_keys(Keys.DOWN)
sleep(0.5)
self.send_keys(Keys.SPACE)
sleep(0.5)
self.send_keys(Keys.TAB)
sleep(0.5)
self.send_keys(location)
self.send_keys(Keys.ENTER)
else:
self.send_keys(Keys.TAB)
self.send_keys(location)
self.send_keys(Keys.ENTER)
while not self.driver.find_elements_by_class_name("option-label"):
sleep(1)
selected, *choices = [
e.get_attribute("innerHTML")
for e in self.driver.find_elements_by_class_name("option-label")
]
idx_selected = choices.index(selected)
try:
idx_target = choices.index(plate)
except ValueError:
logging.error("plate not found")
return False
delta = idx_target - idx_selected
self.send_keys(Keys.SHIFT + Keys.TAB)
sleep(0.5)
if delta:
self.send_keys(Keys.SPACE)
sleep(0.5)
for i in range(-delta):
self.send_keys(Keys.UP)
for i in range(delta):
self.send_keys(Keys.DOWN)
sleep(0.5)
self.send_keys(Keys.SPACE)
self.driver.find_element_by_xpath(
"/html/body/div/section/md-content/form/button"
).click()
while not self.driver.current_url == self.DURATION_URL:
sleep(1)
for _ in range(10):
try:
menu = self.driver.find_element_by_tag_name("md-select")
menu.click()
except Exception:
sleep(1)
else:
break
choices = [
e.get_attribute("innerText")
for e in self.driver.find_elements_by_class_name("option-label")
]
assert len(choices) == len(set(choices)), "A zone is probably already selected"
choices = [choice.split("(")[1][:-1] for choice in choices]
try:
idx_target = choices.index(rate)
except ValueError:
logging.error("rate not found")
return False
sleep(0.5)
for _ in range(idx_target):
self.send_keys(Keys.DOWN)
sleep(0.5)
sleep(0.5)
self.send_keys(Keys.SPACE)
sleep(0.5)
self.send_keys(Keys.TAB)
self.send_keys(str(duration))
self.send_keys(Keys.ENTER)
sleep(1)
while not self.driver.current_url == self.CONFIRM_URL:
sleep(1)
cost = self.driver.find_element_by_class_name("total-cost").text
if check_cost is not None and cost != check_cost:
logging.warning(
"cost %s didn't match forecasted cost %s, transaction aborted",
cost,
check_cost,
)
logging.info("confimed purchase for %s", cost)
button = self.driver.find_element_by_xpath(self.CONFIRM_BUTTON_XPATH)
button.click()
while (
not self.driver.find_element_by_class_name("content-title").text
== "You've paid!"
):
sleep(0.5)
return cost
def quit(self):
self.driver.quit()
@staticmethod
def parse_parking_session(el):
try:
rate = el.find_element_by_class_name("rate-option-details").text
except NoSuchElementException:
rate = "none"
return ParkingSession(
LicensePlate=el.find_element_by_class_name("license-plate").text,
LocationNumber=el.find_element_by_class_name("location-number").text,
ExpiryDate=parse_date(
el.find_element_by_class_name("expiry-date")
.find_element_by_tag_name("strong")
.text
),
RateOption=rate,
)
| louisabraham/paybybot | paybybot/bot.py | bot.py | py | 9,890 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.PhantomJS",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 46,
"usage_type": "name"
},
{
"api_name":... |
26162270266 | from django.urls import path
from .views import *
app_name = 'pagecrud'
urlpatterns = [
path('index/', index, name='index'),
path('catalog/', CatalogView.as_view(), name='catalogcrud'),
path('catalogDelete/<int:delete_id>/', catalogDelete, name='catalogdelete'),
path('catalogUpdate/<int:id_catalog>/', catalogUpdate, name='catalogupdate'),
path('repository/', RepositoryView.as_view(), name='repositorycrud'),
path('repositoryDelete/<int:delete_id>/', repositoryDelete, name='repositorydelete'),
path('repositoryUpdate/<int:id_repository>/', repositoryUpdate, name='repositoryupdate'),
path('journalDelete/<int:delete_id>/', journalDelete, name='journaldelete'),
path('journalUpdate/<int:id_journal>/', journalUpdate, name='journalupdate'),
path('journal/', JournalView.as_view(), name='journalcrud'),
] | muhzulfik/project-library | crudPage/urls.py | urls.py | py | 845 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
27805897926 | import fire
import pandas as pd
from src import INPUT_DATA_PATH, TRAIN_DATA_PATH, VAL_DATA_PATH
def create_spilts(
train_val_split: float = 0.1,
path_input_data: str = INPUT_DATA_PATH,
path_train_data: str = TRAIN_DATA_PATH,
path_val_data: str = VAL_DATA_PATH,
):
df = pd.read_csv(path_input_data)
fraction = 1 - train_val_split
# Splitting data into train and val beforehand since preprocessing will be different for datasets.
tamil_examples = df[df["language"] == "tamil"]
train_split_tamil = tamil_examples.sample(frac=fraction, random_state=200)
val_split_tamil = tamil_examples.drop(train_split_tamil.index)
hindi_examples = df[df["language"] == "hindi"]
train_split_hindi = hindi_examples.sample(frac=fraction, random_state=200)
val_split_hindi = hindi_examples.drop(train_split_hindi.index)
train_split = pd.concat([train_split_tamil, train_split_hindi]).reset_index(drop=True)
val_split = pd.concat([val_split_tamil, val_split_hindi]).reset_index(drop=True)
train_split.to_csv(path_train_data, index=False)
val_split.to_csv(path_val_data, index=False)
if __name__ == "__main__":
fire.Fire(create_spilts)
| karthikrangasai/sturdy-eureka | src/create_splits.py | create_splits.py | py | 1,194 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.INPUT_DATA_PATH",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "src.TRAIN_DATA_PATH",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "src.VAL_DATA_PATH",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.read_... |
42310547395 | #!/usr/bin/env python
import numpy as np
import os
import pytest
import requests # noqa: F401
from src.data_collection.Poloniex import poloniex_data
def test_init():
with pytest.raises(TypeError):
poloniex_data.PoloniexDataManager()
@pytest.fixture(scope='function')
def mock_retry_request(monkeypatch, request):
monkeypatch.setattr(
'src.data_collection.Poloniex.poloniex_data.retry_request',
lambda func: lambda *args, **kwargs: request.param.pop(0)
)
@pytest.fixture(scope='function')
def manager(monkeypatch, request):
monkeypatch.setattr(
'src.data_collection.Poloniex.poloniex_data.END_DATE',
request.param[0])
polo = poloniex_data.PoloniexDataManager('BTC_POT')
polo.start = 0
polo.period = request.param[1]
return polo
@pytest.mark.parametrize('manager,mock_retry_request,expected', [
((10, 10), [np.arange(10000)], 10),
((10, 20), [np.arange(10000), np.arange(10000)], 10),
((30000000, 28000000), [np.arange(10000), np.arange(10000)], 25920000),
((10, 5), [[], np.arange(10000)], 5),
((10, 3), [np.arange(4000), np.arange(8000)], 9),
((20, 10), [np.arange(50001), np.arange(25000)], 5)
], indirect=['manager', 'mock_retry_request'])
def test_validate_period(manager, mock_retry_request, expected):
assert manager.validate_period() == expected
# def exception_raiser(arr: list):
# return arr.pop(0)
# @pytest.mark.parametrize('arr', [
# ([requests.exceptions.ReadTimeout] * 2 + [1]),
# ([requests.exceptions.ReadTimeout] * 3 + [1]),
# ([requests.exceptions.ReadTimeout] * 4 + [1]),
# ])
# def test_retry_request_valid(arr):
# assert poloniex_data.retry_request(exception_raiser)(arr) == arr[-1]
# @pytest.mark.parametrize('arr', [
# ([requests.exceptions.ReadTimeout] * 5),
# ])
# def test_retry_request_errors(arr):
# with pytest.raises(requests.exceptions.ReadTimeout):
# poloniex_data.retry_request(exception_raiser)(arr)
if __name__ == '__main__':
pytest.main([os.path.join(
'..', 'src', 'data_collection', 'Poloniex', 'test_poloniex_data.py')])
| tobiasraabe/crypto | src/data_collection/Poloniex/test_poloniex_data.py | test_poloniex_data.py | py | 2,125 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytest.raises",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "src.data_collection.Poloniex.poloniex_data.PoloniexDataManager",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "src.data_collection.Poloniex.poloniex_data",
"line_number": 13,
... |
3018927991 | # -*- coding: utf-8 -*-
"""
Performance test of cnn architectures on various degradation models.
Created on Thu May 24 11:00:00 2018
Author: Prasun Roy | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)
GitHub: https://github.com/prasunroy/cnn-on-degraded-images
"""
# imports
from __future__ import division
from __future__ import print_function
import cv2
import glob
import json
import numpy
import os
import pandas
import sys
from keras.applications import mobilenet
from keras.models import model_from_json
from matplotlib import pyplot
from libs.CapsuleNetwork import CapsuleLayer
from libs.CapsuleNetwork import Mask
from libs.CapsuleNetwork import Length
from libs.DegradationModels import imdegrade
from libs.PipelineUtils import save_samples
from libs.PipelineUtils import shutdown
from mlutils.callbacks import Telegram
# configurations
# -----------------------------------------------------------------------------
RANDOM_SEED = None
RANDOM_STR = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
PROCESS_ID = ''.join([RANDOM_STR[numpy.random.randint(0, len(RANDOM_STR))] \
for _ in range(16)])
DATASET_ID = 'synthetic_digits'
LABEL_MAPS = 'data/{}/labelmap.json'.format(DATASET_ID)
IMAGE_DSRC = 'data/{}/imgs_valid/'.format(DATASET_ID)
IMAGE_READ = 1
SAVE_NOISY = False
SAMP_NOISY = 10
NOISE_LIST = ['Gaussian_White', 'Gaussian_Color', 'Salt_and_Pepper',
'Motion_Blur', 'Gaussian_Blur', 'JPEG_Quality']
MODEL_LIST = ['capsnet', 'inceptionv3', 'mobilenet', 'resnet50',
'vgg16', 'vgg19']
MODELS_DICT = {name.lower(): 'output/{}/{}/models/{}.json'\
.format(DATASET_ID, name, name) for name in MODEL_LIST}
WEIGHT_DICT = {name.lower(): 'output/{}/{}/checkpoints/{}_best.h5'\
.format(DATASET_ID, name, name) for name in MODEL_LIST}
TOP_N_PRED = 3
OUTPUT_DIR_NOISY = 'output/{}/__test__images__'.format(DATASET_ID)
OUTPUT_DIR_TOP_1 = 'output/{}/__test__top{}__/'.format(DATASET_ID, 1)
OUTPUT_DIR_TOP_N = 'output/{}/__test__top{}__/'.format(DATASET_ID, TOP_N_PRED)
AUTH_TOKEN = None
TELCHAT_ID = None
TEL_CLIENT = Telegram(auth_token=AUTH_TOKEN, chat_id=TELCHAT_ID)
F_SHUTDOWN = False
# -----------------------------------------------------------------------------
# setup parameters
sigmavals = [x for x in range(0, 256, 5)]
densities = [x/100 for x in range(0, 101, 5)]
mb_ksizes = [x for x in range(3, 32, 2)]
gb_ksizes = [x for x in range(1, 52, 2)]
qualities = [x for x in range(30, -1, -2)]
# setup acknowledgement message templates
ack_msg_beg = """`
Received a new test request.
TASK ID: {}
DATASET: {}
SAMPLES: {}
Using CNN architectures:
{}
Using degradation models:
{}
`"""\
.format(PROCESS_ID,
DATASET_ID,
{},
''.join(['\t* {}\n'.format(model) for model in MODEL_LIST]),
''.join(['\t* {}\n'.format(noise) for noise in NOISE_LIST]))
ack_msg_end = """`
An ongoing test is finished.
TASK ID: {}
`"""\
.format(PROCESS_ID)
ack_msg_int = """`
An ongoing test is interrupted.
TASK ID: {}
REASONS: {}
`"""\
.format(PROCESS_ID, {})
# validate paths
def validate_paths():
flag = True
if not os.path.isfile(LABEL_MAPS):
print('[INFO] Label mapping not found at {}'.format(LABEL_MAPS))
flag = False
if not os.path.isdir(IMAGE_DSRC):
print('[INFO] Image data source not found at {}'.format(IMAGE_DSRC))
flag = False
for name, path in MODELS_DICT.items():
if not os.path.isfile(path):
print('[INFO] {} architecture not found at {}'.format(name, path))
flag = False
for name, path in WEIGHT_DICT.items():
if not os.path.isfile(path):
print('[INFO] {} checkpoint not found at {}'.format(name, path))
flag = False
for directory in [OUTPUT_DIR_NOISY, OUTPUT_DIR_TOP_1, OUTPUT_DIR_TOP_N]:
if not os.path.isdir(directory):
os.makedirs(directory)
elif len(glob.glob(os.path.join(directory, '*.*'))) > 0:
print('[INFO] Output directory {} must be empty'.format(directory))
flag = False
return flag
# load data
def load_data():
x = []
y = []
# label mapping
with open(LABEL_MAPS, 'r') as file:
labelmap = json.load(file)
# class labels
labels = [os.path.split(d[0])[-1] for d in os.walk(IMAGE_DSRC)][1:]
# read images
for label in labels:
for file in glob.glob(os.path.join(IMAGE_DSRC, label, '*.*')):
image = cv2.imread(file, IMAGE_READ)
if image is None:
continue
x.append(image)
y.append(labelmap[label])
return (x, y)
# load models
def load_models():
models = {}
# define required custom objects
args_caps = {'CapsuleLayer': CapsuleLayer, 'Mask': Mask, 'Length': Length}
args_mobi = {'relu6': mobilenet.relu6}
# load model architectures
for name, path in MODELS_DICT.items():
with open(path, 'r') as file:
model_json = file.read()
if name == 'capsnet':
models[name] = model_from_json(model_json, args_caps)
elif name == 'mobilenet':
models[name] = model_from_json(model_json, args_mobi)
else:
models[name] = model_from_json(model_json)
# load model weights
for name, path in WEIGHT_DICT.items():
models[name].load_weights(path)
return models
# initialize histories
def init_histories(init_dict={}):
histories_top_1 = init_dict.copy()
histories_top_n = init_dict.copy()
for name in MODEL_LIST:
histories_top_1['acc_' + name.lower()] = []
histories_top_n['acc_' + name.lower()] = []
return [histories_top_1, histories_top_n]
# save and plot histories
def save_and_plot_histories(file_id, histories, title='', xlabel='', ylabel='',
invert_xaxis=False, invert_yaxis=False):
for hist_dict, output_dir, ylabel_prefix \
in zip(histories,
[OUTPUT_DIR_TOP_1, OUTPUT_DIR_TOP_N],
['', 'top-{} '.format(TOP_N_PRED)]):
# save histories
df = pandas.DataFrame(hist_dict)
df.to_csv(os.path.join(output_dir, str(file_id) + '.csv'), index=False)
# plot histories
pyplot.figure()
pyplot.title(title)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel_prefix + ylabel)
if invert_xaxis:
pyplot.gca().invert_xaxis()
if invert_yaxis:
pyplot.gca().invert_yaxis()
x = [key for key in hist_dict.keys() if key.split('_')[0] != 'acc'][0]
for y in hist_dict.keys():
if y == x:
continue
pyplot.plot(hist_dict[x], hist_dict[y], label=y.split('_')[-1])
pyplot.legend()
pyplot.savefig(os.path.join(output_dir, str(file_id) + '.png'))
pyplot.show(block=False)
# acknowledgement
plot_cap = '`{} [TASK ID: {}]`'.format(title, PROCESS_ID)
plot_img = open(os.path.join(output_dir, str(file_id) + '.png'), 'rb')
TEL_CLIENT._send_photo(data={'chat_id': TELCHAT_ID,
'caption': plot_cap,
'parse_mode': 'Markdown'},
files={'photo': plot_img})
return
# test models
def test_models(x, y, models):
print('')
samples = len(x)
results_top_1 = {}
results_top_n = {}
for name in models.keys():
print('[INFO] Preparing images for {}'.format(name))
images = []
counts = 0
for image in x:
images.append(cv2.resize(image, models[name].input_shape[1:3]))
counts += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(counts*100/samples), end='')
print('\n[INFO] Testing images on {}... '.format(name), end='')
x_test = numpy.asarray(images, dtype='float32') / 255.0
y_test = numpy.asarray(y, dtype='int')
if name == 'capsnet':
p_test = models[name].predict(x_test)[0].argsort(axis=1)[:, -TOP_N_PRED:]
else:
p_test = models[name].predict(x_test).argsort(axis=1)[:, -TOP_N_PRED:]
accuracy_top_1 = sum(p_test[:, -1]==y_test) * 100.0 / samples
accuracy_top_n = sum([int(y in p) for y, p in zip(y_test, p_test)]) * 100.0 / samples
results_top_1['acc_' + name] = accuracy_top_1
results_top_n['acc_' + name] = accuracy_top_n
print('done\t\t[accuracy: {:6.2f}%] [top-{} accuracy: {:6.2f}%]'\
.format(accuracy_top_1, TOP_N_PRED, accuracy_top_n))
return [results_top_1, results_top_n]
# tests for gaussian white noise
def test_gaussian_white(x, y, models):
# initialize histories
histories = init_histories({'sigma': sigmavals})
# run tests
for sigma in sigmavals:
# apply gaussian white noise
print('[INFO] Applying Gaussian white noise with mu=0 and sigma={}'\
.format(sigma))
noisy = []
count = 0
for image in x:
noisy.append(imdegrade(image, 'gaussian_white', mu=0, sigma=sigma,
seed=RANDOM_SEED))
count += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(count*100/len(x)), end='')
# save noisy image samples if required
if SAVE_NOISY:
save_samples(noisy, SAMP_NOISY, randomize=True, seed=RANDOM_SEED,
filename_prefix='gaussian_white_sigma_{}'.format(sigma),
target_directory=OUTPUT_DIR_NOISY)
# test models
results = test_models(noisy, y, models)
# update histories
for hist_dict, res_dict in zip(histories, results):
for key in res_dict.keys():
hist_dict[key].append(res_dict[key])
# save and plot histories
save_and_plot_histories(file_id='gaussian_white', histories=histories,
title='Change in accuracy with Gaussian white noise',
xlabel='standard deviation (\u03c3)',
ylabel='accuracy (\u0025)')
return
# tests for gaussian color noise
def test_gaussian_color(x, y, models):
# initialize histories
histories = init_histories({'sigma': sigmavals})
# run tests
for sigma in sigmavals:
# apply gaussian color noise
print('[INFO] Applying Gaussian color noise with mu=0 and sigma={}'\
.format(sigma))
noisy = []
count = 0
for image in x:
noisy.append(imdegrade(image, 'gaussian_color', mu=0, sigma=sigma,
seed=RANDOM_SEED))
count += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(count*100/len(x)), end='')
# save noisy image samples if required
if SAVE_NOISY:
save_samples(noisy, SAMP_NOISY, randomize=True, seed=RANDOM_SEED,
filename_prefix='gaussian_color_sigma_{}'.format(sigma),
target_directory=OUTPUT_DIR_NOISY)
# test models
results = test_models(noisy, y, models)
# update histories
for hist_dict, res_dict in zip(histories, results):
for key in res_dict.keys():
hist_dict[key].append(res_dict[key])
# save and plot histories
save_and_plot_histories(file_id='gaussian_color', histories=histories,
title='Change in accuracy with Gaussian color noise',
xlabel='standard deviation (\u03c3)',
ylabel='accuracy (\u0025)')
return
# tests for salt and pepper noise
def test_salt_and_pepper(x, y, models):
# initialize histories
histories = init_histories({'density': densities})
# run tests
for density in densities:
# apply salt and pepper noise
print('[INFO] Applying salt and pepper noise with density={}'\
.format(density))
noisy = []
count = 0
for image in x:
noisy.append(imdegrade(image, 'salt_and_pepper', density=density,
seed=RANDOM_SEED))
count += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(count*100/len(x)), end='')
# save noisy image samples if required
if SAVE_NOISY:
save_samples(noisy, SAMP_NOISY, randomize=True, seed=RANDOM_SEED,
filename_prefix='salt_and_pepper_density_{}'.format(density),
target_directory=OUTPUT_DIR_NOISY)
# test models
results = test_models(noisy, y, models)
# update histories
for hist_dict, res_dict in zip(histories, results):
for key in res_dict.keys():
hist_dict[key].append(res_dict[key])
# save and plot histories
save_and_plot_histories(file_id='salt_and_pepper', histories=histories,
title='Change in accuracy with salt and pepper noise',
xlabel='noise density',
ylabel='accuracy (\u0025)')
return
# tests for motion blur
def test_motion_blur(x, y, models):
# initialize histories
histories = init_histories({'kernel_size': mb_ksizes})
# run tests
for ksize in mb_ksizes:
# apply motion blur
print('[INFO] Applying motion blur with kernel size=({}, {})'\
.format(ksize, ksize))
noisy = []
count = 0
mb_kernel = numpy.zeros((ksize, ksize))
mb_kernel[ksize//2, :] = 1
mb_kernel /= numpy.sum(mb_kernel)
for image in x:
noisy.append(imdegrade(image, 'motion_blur', mb_kernel=mb_kernel,
seed=RANDOM_SEED))
count += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(count*100/len(x)), end='')
# save noisy image samples if required
if SAVE_NOISY:
save_samples(noisy, SAMP_NOISY, randomize=True, seed=RANDOM_SEED,
filename_prefix='motion_blur_ksize_({}x{})'.format(ksize, ksize),
target_directory=OUTPUT_DIR_NOISY)
# test models
results = test_models(noisy, y, models)
# update histories
for hist_dict, res_dict in zip(histories, results):
for key in res_dict.keys():
hist_dict[key].append(res_dict[key])
# save and plot histories
save_and_plot_histories(file_id='motion_blur', histories=histories,
title='Change in accuracy with motion blur',
xlabel='kernel size',
ylabel='accuracy (\u0025)')
return
# tests for gaussian blur
def test_gaussian_blur(x, y, models):
# initialize histories
histories = init_histories({'kernel_size': gb_ksizes})
# run tests
for ksize in gb_ksizes:
# apply gaussian blur
print('[INFO] Applying Gaussian blur with kernel size=({}, {})'\
.format(ksize, ksize))
noisy = []
count = 0
for image in x:
noisy.append(imdegrade(image, 'gaussian_blur',
gb_ksize=(ksize, ksize), seed=RANDOM_SEED))
count += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(count*100/len(x)), end='')
# save noisy image samples if required
if SAVE_NOISY:
save_samples(noisy, SAMP_NOISY, randomize=True, seed=RANDOM_SEED,
filename_prefix='gaussian_blur_ksize_({}x{})'.format(ksize, ksize),
target_directory=OUTPUT_DIR_NOISY)
# test models
results = test_models(noisy, y, models)
# update histories
for hist_dict, res_dict in zip(histories, results):
for key in res_dict.keys():
hist_dict[key].append(res_dict[key])
# save and plot histories
save_and_plot_histories(file_id='gaussian_blur', histories=histories,
title='Change in accuracy with Gaussian blur',
xlabel='kernel size',
ylabel='accuracy (\u0025)')
return
# tests for jpeg quality
def test_jpeg_quality(x, y, models):
# initialize histories
histories = init_histories({'image_quality': qualities})
# run tests
for quality in qualities:
# apply jpeg compression
print('[INFO] Applying JPEG compression with quality={}'\
.format(quality))
noisy = []
count = 0
for image in x:
noisy.append(imdegrade(image, 'jpeg_compression', quality=quality,
seed=RANDOM_SEED))
count += 1
print('\r[INFO] Progress... {:3.0f}%'\
.format(count*100/len(x)), end='')
# save noisy image samples if required
if SAVE_NOISY:
save_samples(noisy, SAMP_NOISY, randomize=True, seed=RANDOM_SEED,
filename_prefix='jpeg_quality_{}'.format(quality),
target_directory=OUTPUT_DIR_NOISY)
# test models
results = test_models(noisy, y, models)
# update histories
for hist_dict, res_dict in zip(histories, results):
for key in res_dict.keys():
hist_dict[key].append(res_dict[key])
# save and plot histories
save_and_plot_histories(file_id='jpeg_quality', histories=histories,
title='Change in accuracy with JPEG quality',
xlabel='image quality',
ylabel='accuracy (\u0025)',
invert_xaxis=True)
return
# test
def test():
# validate paths
if not validate_paths():
return
# load data
print('[INFO] Loading data... ', end='')
(x, y) = load_data()
print('done')
# load models
print('[INFO] Loading models... ', end='')
models = load_models()
print('done')
# acknowledgement
TEL_CLIENT._send_message(data={'chat_id': TELCHAT_ID,
'text': ack_msg_beg.format(len(x)),
'parse_mode': 'Markdown'})
print('-'*34 + ' BEGIN TEST ' + '-'*34)
# run tests
for noise in NOISE_LIST:
if noise.lower() == 'gaussian_white':
test_gaussian_white(x, y, models)
elif noise.lower() == 'gaussian_color':
test_gaussian_color(x, y, models)
elif noise.lower() == 'salt_and_pepper':
test_salt_and_pepper(x, y, models)
elif noise.lower() == 'motion_blur':
test_motion_blur(x, y, models)
elif noise.lower() == 'gaussian_blur':
test_gaussian_blur(x, y, models)
elif noise.lower() == 'jpeg_quality':
test_jpeg_quality(x, y, models)
print('-'*35 + ' END TEST ' + '-'*35)
# acknowledgement
TEL_CLIENT._send_message(data={'chat_id': TELCHAT_ID,
'text': ack_msg_end,
'parse_mode': 'Markdown'})
return
# main
if __name__ == '__main__':
try:
test()
if F_SHUTDOWN:
shutdown()
except:
error = sys.exc_info()[0].__name__ if sys.exc_info()[0] is not None else 'Unknown'
print('\n[INFO] Process interrupted [Reasons: {}]'.format(error))
# acknowledgement
TEL_CLIENT._send_message(data={'chat_id': TELCHAT_ID,
'text': ack_msg_int.format(error),
'parse_mode': 'Markdown'})
| prasunroy/cnn-on-degraded-images | test.py | test.py | py | 20,147 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "numpy.random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "mlutils.callbacks.Telegram",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.... |
74064928034 | import torch.nn as nn
class stack_conv_layer(nn.Module):
def __init__(self, filter):
super(stack_conv_layer, self).__init__()
self.activation = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(filter[0], filter[1], kernel_size=3, padding=1, bias=True)
self.conv2 = nn.Conv2d(filter[1], filter[2], kernel_size=3, padding=1, bias=True)
self.batchnorm1 = nn.BatchNorm2d(filter[1])
self.batchnorm2 = nn.BatchNorm2d(filter[2])
def forward(self, feature_map):
out1 = self.conv1(feature_map)
bn1 = self.batchnorm1(out1)
res1 = self.activation(bn1)
out2 = self.conv2(res1)
bn2 = self.batchnorm2(out2)
res2 = self.activation(bn2)
return res2
"""
This is the official way to stack layer. Let's see if we can use this.
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
"""
class upsample_layer(nn.Module):
def __init__(self, filter):
super(upsample_layer, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.activation = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(filter[0], filter[1], kernel_size=3, padding=1, bias=True)
self.batchnorm1 = nn.BatchNorm2d(filter[1])
def forward(self, feature_map):
up = self.upsample(feature_map)
con = self.conv1(up)
bn = self.batchnorm1(con)
output = self.activation(bn)
return output
| Cli98/tep-repo | Networks/customer_module.py | customer_module.py | py | 2,042 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number"... |
36698448915 | from visual import *
from visual.graph import * # graphing capability
import math
from matplotlib import pyplot as plt
import random
from random import randint
import numpy as np
from scipy.special import gamma # usando gamma
import pylab
from scipy.stats import beta
import panda as pd
from matplotlib.ticker import MaxNLocator
from collections import namedtuple
fig, ax = plt.subplots(1,1)
# Função de distribuição Beta:
def beta_pdff(x,a,b):
return float( gamma(a+b)/(gamma(a)*gamma(b)) ) *(x**(a-1)) * ((1-x)**(b-1))
def gauss_pdf(x, mu, sigma):
return (1./(math.sqrt(2.*math.pi)*sigma))*math.exp(-np.power((x - mu)/sigma, 2.)/2)
a, b = 2.0,1.0
#x = np.linspace(beta.ppf(0.01, a, b),beta.ppf(0.99, a, b), 100)
y = np.linspace(0, 1, 1002)[1:-1]
#ax.plot(x, beta.pdf(x, a, b),'r-', lw=5, alpha=0.6, label='beta pdf')
'''
dis = beta(a,b)
dis2 = beta(a+1.,b)
dis3 = beta(a+2.,b)
dis4 = beta(a+2.,b+1.)
dis5 = beta(a+8.,b+2.)
dis6 = beta(a+10.,b+6.)
dis7 = beta(a+20.,b)
ax.plot(y,dis.pdf(y),'r-', lw=1, alpha=0.6, label='a=2.0 b=1.0')
ax.plot(y,dis2.pdf(y),'b-', lw=1, alpha=0.6, label='a=3.0 b=1.0')
ax.plot(y,dis3.pdf(y),'g-', lw=1, alpha=0.6, label='a=3.0 b=2.0')
ax.plot(y,dis4.pdf(y),'b-', lw=1, alpha=0.6, label='a=4.0 b=2.0')
ax.plot(y,dis5.pdf(y),'w-', lw=1, alpha=0.6, label='a=10.0 b=3.0')
ax.plot(y,dis6.pdf(y),'k-', lw=1, alpha=0.6, label='a=12.0 b=7.0')
ax.plot(y,dis7.pdf(y),'k--', lw=1, alpha=0.6, label='a=22.0 b=0.0')
legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large')
'''
#### Função do reward deterministica:
def recompensa(Place):
if (Place == 0):
#ActualPlace = "A"
return 1
elif (Place == 1):
#ActualPlace = "B"
return 0
elif (Place == 2):
#ActualPlace = "C"
return 0
elif (Place == 3):
#ActualPlace = "D"
return 0
def recompensa_gauss(Place):
if (Place == 0):
#ActualPlace = "A"
return random.gauss(3,0.4)
elif (Place == 1):
#ActualPlace = "B"
return random.gauss(1,1)
elif (Place == 2):
#ActualPlace = "C"
return random.gauss(4,0.6)
elif (Place == 3):
#ActualPlace = "D"
return random.gauss(2,0.1)
###############################################################
# Thompsons Learning for Beta distribution:
K = 4 # arms = 4
T = 100 # Period
S = [0] * K # Sucess vector
F = [0] * K # Failure vector
ad_vector = [] # Vetor î
total_reward = 0 # Somatorio das recompensas
recompensas_vector = []
prob_distr_0 = []
prob_distr_1 = []
prob_distr_2 = []
prob_distr_3 = []
for t in range(T):
teta_max = 0
ad = 0
for i in range (K):
teta_random = random.betavariate(S[i]+1,F[i]+1)
if(i==0):
prob_distr_0.append(1./(1.+(F[0]+1.)/(S[0]+1.)))
#print(1/(1+(F[0]+1.)/(S[0]+1.)))
elif(i==1):
prob_distr_1.append(1./(1.+(F[1]+1.)/(S[1]+1.)))
elif(i==2):
prob_distr_2.append(1./(1.+(F[2]+1.)/(S[2]+1.)))
elif(i==3):
prob_distr_3.append( 1./( 1. + (F[3]+1.)/(S[3]+1.) ) )
if(teta_random > teta_max):
teta_max = teta_random
ad = i
ad_vector.append(ad)
#print("valor do indice i(arm)=",ad)
#print("valor do teta max=",teta_max)
reward = recompensa(ad) #recebe reward de alguma function deterministico
# mude recompensa para recompensa_gauss para obter processo estocastico
if(reward == 1): # mude seu parametro de seleção paravalores > 3
S[ad] = S[ad] + 1 # por exemplo, e valores <=3 para recompensas
elif(reward == 0): # ruins
F[ad] = F[ad] + 1
recompensas_vector.append(total_reward + reward)
total_reward = total_reward + reward
#print(total_reward, t)
################################################################
# EM learning problem based on Thompson Sampling:
K = 4 # arms = 4
T1 = 3 # Period
S = [0]*K
F = [0]*K
Q = [0]*K # vetor de cada arm
P = [0.5]*K # vetor de prior
Mu = [1,1,1,1] # vetor de medias
Sigma = [0.1,0.1,0.1,0.1] # vetor de variancia
X = [] # Vetores de dados -
total_reward_EM = 0 # Somatorio das recompensas
recompensas_vector_EM = []
ad_vector_EM = []
for t in range(T1):
teta_max = 0
ad = 0
print("-------NEW Interaction---------")
for i in range(K):
teta_random = random.gauss(Mu[i],Sigma[i])
print("arm=",i,"valor=",teta_random)
if(teta_random > teta_max):
teta_max = teta_random
ad = i
ad_vector_EM.append(ad)
reward = recompensa(ad)
X.append(reward)
if(reward == 1):
S[i] = S[i] + 1
else:
F[i] = F[i] + 1
P[i] = random.betavariate(S[i]+1,F[i]+1)
print("P[",i,"]=",P[i])
print("X =",X)
total_reward_EM = total_reward_EM + reward
recompensas_vector_EM.append(total_reward + reward)
print("total rewards=",total_reward_EM)
print("Mu[0],Sigma[0]=",Mu[0],Sigma[0])
print("Mu[1],Sigma[1]=",Mu[1],Sigma[1])
print("Mu[2],Sigma[2]=",Mu[2],Sigma[2])
print("Mu[3],Sigma[3]=",Mu[3],Sigma[3])
##### EM STEP:
# E-STEP:
P_D1 = [0]*len(X)
P_D2 = [0]*len(X)
P_D3 = [0]*len(X)
P_D4 = [0]*len(X)
a=[0]*len(X)
b=[0]*len(X)
c=[0]*len(X)
d=[0]*len(X)
# Prior vector
for j in range(len(X)):
a[j] = float(P[0]*gauss_pdf(X[j],Mu[0],Sigma[0])) + float(P[0]*gauss_pdf(X[j],Mu[1],Sigma[1])) + float(P[0]*gauss_pdf(X[j],Mu[2],Sigma[2])) + float(P[0]*gauss_pdf(X[j],Mu[3],Sigma[3]))
for j in range(len(X)):
b[j] = float(P[1]*gauss_pdf(X[j],Mu[0],Sigma[0])) + float(P[1]*gauss_pdf(X[j],Mu[1],Sigma[1])) + float(P[1]*gauss_pdf(X[j],Mu[2],Sigma[2])) + float(P[1]*gauss_pdf(X[j],Mu[3],Sigma[3]))
for j in range(len(X)):
c[j] =float(P[2]*gauss_pdf(X[j],Mu[0],Sigma[0]))+ float(P[2]*gauss_pdf(X[j],Mu[1],Sigma[1]))+ float(P[2]*gauss_pdf(X[j],Mu[2],Sigma[2]))+ float(P[2]*gauss_pdf(X[j],Mu[3],Sigma[3]))
for j in range(len(X)):
d[j] =float(P[3]*gauss_pdf(X[j],Mu[0],Sigma[0]))+ float(P[3]*gauss_pdf(X[j],Mu[1],Sigma[1]))+ float(P[3]*gauss_pdf(X[j],Mu[2],Sigma[2]))+ float(P[3]*gauss_pdf(X[j],Mu[3],Sigma[3]))
# Baysian vector
for j in range(len(X)):
P_D1[j]=float( P[0]*gauss_pdf(X[j],Mu[0],Sigma[0])/ float(a[j]) )
for j in range(len(X)):
P_D2[j]=float( P[1]*gauss_pdf(X[j],Mu[1],Sigma[1]) / float(b[j]))
for j in range(len(X)):
P_D3[j]=float( P[2]*gauss_pdf(X[j],Mu[2],Sigma[2]) / float(c[j]) )
for j in range(len(X)):
P_D4[j]=float( P[3]*gauss_pdf(X[j],Mu[3],Sigma[3]) / float(d[j]) )
print("PD1=",P_D1)
print("PD2=",P_D2)
print("PD3=",P_D3)
print("PD4=",P_D4)
#M-STEP:
mu = 0.
sig = 0.
for j in range(len(X)):
mu = mu + P_D1[j]
mu = float(mu)/len(X)
Mu[0] = mu
for j in range(len(X)):
sig = sig + ( (X[j]-mu)**2 ) * P_D1[j]
sig = float(sig)/len(X)
Sigma[0]=sig
# ARM 2 new feature vector
mu = 0.
sig = 0.
for j in range(len(X)):
mu = mu + P_D2[j]
mu = float(mu)/len(X)
Mu[1] = mu
for j in range(len(X)):
sig = sig + ( (X[j]-mu)**2 ) * P_D2[j]
sig = float(sig)/len(X)
Sigma[1]=sig
# ARM 3 new feature vector
mu = 0.
sig = 0.
for j in range(len(X)):
mu = mu + P_D3[j]
mu = float(mu)/len(X)
Mu[2] = mu
for j in range(len(X)):
sig = sig + ( (X[j]-mu)**2 ) * P_D3[j]
sig = float(sig)/len(X)
Sigma[2]=sig
# ARM 4 new feature vector
mu = 0.
sig = 0.
for j in range(len(X)):
mu = mu + P_D4[j]
mu = float(mu)/sum(P_D1)
Mu[3] = mu
for j in range(len(X)):
sig = sig + ( (X[j]-mu)**2 ) * P_D4[j]
sig = float(sig)/len(X)
Sigma[3]=sig
#################################################################
def eplsion_greedy(epislon,T):
K = 4
Q = [0]*K
N = [0]*K
Reward = []
#T = 100
total_reward_eg = 0
for t in range(T):
maior = 0
ad = 0
probability = random.random()
if(probability > epislon):
for i in range (K):
if(Q[i] >= maior):
maior = Q[i]
ad = i
else:
ad = randint(0,3)
N[ad] = N[ad] + 1
rewa = recompensa(ad)
#Q[ad] = Q[ad] + (1/N[ad])*(rewa - Q[ad])
Q[ad] = Q[ad] + rewa
total_reward_eg = total_reward_eg + rewa
Reward.append(total_reward_eg)
return(Reward)
'''
def eplsion_greedy(eplison,T):
Reward_vector = []
arm_vector = [0]*K
Rew = []
total_reward_MAB = 0
total_vector = []
OldEpsilon = eplison
arm = randint(0,3)
Rew.append(recompensa(arm))
for t in range(T):
NewEpsilon = random.random()
if(NewEpsilon > OldEpsilon):
Rew.append(max(Rew))
arm = 1
else:
arm = randint(0,3)
Rew.append(recompensa(arm))
total_reward_MAB = sum(Rew)
total_vector.append(total_reward_MAB)
arm_vector[arm]=arm_vector[arm] + 1
return(sum(Rew))
'''
def eplsion_trials(trials,eplsion,T):
t=0
media_recompensa=[]
while(t<trials):
media_recompensa.append(eplsion_greedy(eplsion,T))
t+=1
return(sum(media_recompensa)/trials)
def eplsion_var(trials,eplsion,T):
t=0
var = []
media_recompensa=[]
while(t<trials):
media_recompensa.append(eplsion_greedy(eplsion,T))
t+=1
md = sum(media_recompensa)/float(trials)
#print("media=",md)
for i in range(trials):
var.append((eplsion_greedy(eplsion,T)-md)**2)
variancia = sqrt(sum(var)/float(trials))
return variancia
################################################################
# Epsilon Greedy -A simple bandit algorithm
K = 4
Q = [0]*K
N = [0]*K
Reward_EG = []
epislon = 0.5
total_reward_eg = 0
prob_arm1_Eps = []
prob_arm2_Eps = []
prob_arm3_Eps = []
prob_arm4_Eps = []
ad1 = 0
ad0 = 0
ad2 = 0
ad3 = 0
for t in range(T):
maior = 0
ad = 0
probability = random.random()
if(probability > epislon):
for i in range (K):
if(Q[i] >= maior):
maior = Q[i]
ad = i
else:
ad = randint(0,3)
#print("indice=",ad)
N[ad] = N[ad] + 1
rewa = recompensa(ad)
#Q[ad] = Q[ad] + (1/N[ad])*(rewa - Q[ad])
Q[ad] = Q[ad] + rewa
total_reward_eg = total_reward_eg + rewa
Reward_EG.append(total_reward_eg)
################################################################
###### Plot graficos:
time = np.linspace(0, T, T)
fig, ax = plt.subplots()
# GRAFICOS DE RECOMPENSA
'''
cont = 0
Max_rew = []
for t in range(T):
cont = cont + 1
Max_rew.append(cont)
#ax.plot(time,Max_rew, 'g', label='Max reward')
a1=eplsion_greedy(0.0289)
a2=eplsion_greedy(0.1413)
a3=eplsion_greedy(0.4103)
a4=eplsion_greedy(0.9250)
a5=eplsion_greedy(0.8137)
ax.plot(time,a1, 'g-', label=r'$\epsilon$-greedy = 0.0289')
ax.plot(time,a2, 'b-', label=r'$\epsilon$-greedy =0.1413')
ax.plot(time,a3, 'k-', label=r'$\epsilon$-greedy =0.4103 ')
ax.plot(time,a4, 'c-', label=r'$\epsilon$-greedy =0.9250 ')
ax.plot(time,a5, 'm-', label=r'$\epsilon$-greedy =0.8137')
print("epsilon 0.0289 = ",a1)
print("epsilon 0.1413 = ",a2)
print("epsilon 0.4103 = ",a3)
print("epsilon 0.9250 = ",a4)
print("epsilon 0.8137 = ",a5)
ax.plot(time, recompensas_vector, 'r', label='Thompson sampling')
legend = ax.legend(loc='upper left', shadow=True, fontsize='medium')
ax.set_xlabel('Time',fontsize=24)
ax.set_ylabel('Rewards',fontsize=24)
legend.get_frame().set_facecolor('#00FFCC')
'''
# GRAFICOS DE BARRA SUCESSO E FAIL:
'''
index = (0,1.175,2.175,3.175)
index3 = (0.35,1.35,2.35,3.35)
index2 = np.arange(K)
action_bar = (S[0],S[1],S[2],S[3])
fail_bar =(F[0],F[1],F[2],F[3])
print(arm_vector)
arm_bar = (arm_vector[0],arm_vector[1],arm_vector[2],arm_vector[3])
bar_width = 0.35
rects1 = ax.bar(index,action_bar, bar_width,alpha=0.4,
color='b',
label='Sucess')
rects1 = ax.bar(index,fail_bar, bar_width,alpha=0.4,
color='r',
label='Fail')
rects1 = ax.bar(index3,arm_bar, bar_width,
color='g',
label='MAB-arm')
ax.set_xticks(index2 + bar_width / 2)
ax.set_xticklabels((' A ', ' B ', ' C ', ' D '))
fig.tight_layout()
ax.set_xlabel('Arms')
ax.set_ylabel('Number of choices')
ax.legend()
'''
# GRAFICO DE BARRAS DE SOMA DE RECOMPENSAS:
'''
index = (0.175,1.175,2.175,3.175,4.175)
index2 = np.arange(K+2)
index3 = (0.35,1.52,2.52,3.52,4.52)
error_config = {'ecolor': '0.3'}
std_ep = (100*0.1711,100*0.132076225107127,100*0.04248582539982974,100*0.03749757081166364,100*0.03670655740239766)
#a1 = eplsion_trials(81, 0.0289,100)
#a2 = eplsion_trials(81,0.1413,100)
#a3 = eplsion_trials(81, 0.4103,100)
#a4 = eplsion_trials(81, 0.8137,100)
#a5 = eplsion_trials(81,0.9250,100)
###
a1=27
a2=64
a3=61
a4=37
a5=29
eplison_bar = (27, 64, 61, 37, 29)
TS_bar = (total_reward,total_reward,total_reward,total_reward,total_reward)
vear_bar = (eplsion_var(21, 0.0289,82),eplsion_var(21,0.1413,82),eplsion_var(21,0.4103,82),eplsion_var(21,0.8137,82),eplsion_var(21,0.9250,82))
print("desvio padrao=",vear_bar)
bar_width = 0.35
print("vetor de media=",eplison_bar)
rects1 = ax.bar(index,eplison_bar, bar_width,alpha=0.8,
color='g',yerr=std_ep,error_kw=error_config,label=r'$\epsilon$')
#rects2 = ax.bar(index3,TS_bar, bar_width,alpha=0.35,
# color='r',
#label='Thompson sampling')
ax.plot([0.175,1.175,2.175,3.175,4.175], [a1,a2,a3,a4,a5], 'o')
ax.set_xticks(index2 + bar_width / 2)
ax.set_xticklabels((' 0.0289 ', ' 0.1413 ', ' 0.4103 ', ' 0.8137 ' , ' 0.925 '))
fig.tight_layout()
ax.set_xlabel(r'$\epsilon$',fontsize=24)
ax.set_ylabel('Average Reward',fontsize=24)
ax.legend(fontsize=25)
'''
# GRAFICO DE PROBABILIDADE DO THOMPSON E GREEDY:
#fig,ax =plt. subplots()
'''
prob_distr_0
ax.plot(time,prob_distr_0, 'r', label='Probability arm 0')
ax.plot(time, prob_distr_1,'g', label='Probability arm 1')
ax.plot(time, prob_distr_2,'b', label='Probability arm 2')
ax.plot(time, prob_distr_3,'k', label='Probability arm 3')
legend = ax.legend(loc='center right', shadow=True, fontsize='x-large')
ax.set_xlabel('Steps',fontsize=24)
ax.set_ylabel('Probability',fontsize=24)
legend.get_frame().set_facecolor('#00FFCC')
#plt.title("Thompson sampling Probability")
'''
####### ATIVANDO GRAFICOS ##########
#plt.grid(True)
#plt.title("Thompson sampling vs MAB")
plt.show()
###### ATIVANDO GRAFICO DINAMICO #######
'''
graph3 = gdisplay(x=1000, y=100, width=400, height=300,
title='Phase Space', xtitle='angle (rad)', ytitle='omega (rad/s)',
foreground=color.black, background=color.white)
curve3 = gcurve(gdisplay = graph3, color = color.blue)
#curve3.plot(pos = (time, theta))
'''
| lucassolanoc/UFRN | Roboteam/E-greedy/Algoritmos/ThompsonLearning.py | ThompsonLearning.py | py | 14,821 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "scipy.special.gamma",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math... |
30890389119 | """Draft List application."""
import requests, random
import pdb, os
from flask import Flask, request, render_template, redirect, flash, session, jsonify
from flask_debugtoolbar import DebugToolbarExtension
from models import db, connect_db, User, List, PlayerList, Player
from forms import RegisterForm, LoginForm, ComparePlayerForm, ListForm
from sqlalchemy.sql.functions import ReturnTypeFromArgs
class unaccent(ReturnTypeFromArgs):
pass
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(
"DATABASE_URL", 'postgresql:///fantasy-listdb')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
app.config['SECRET_KEY'] = "iliketrucks12345"
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
debug = DebugToolbarExtension(app)
# connect_db needs to be commented out for testing. So that when the app is initiallized the configuration changes for testing will be read.
connect_db(app)
def run_login(user):
"""Login a user by saving the username and id to the session"""
session["USER_ID"] = user.id
session["USERNAME"] = user.username
def make_dbplayers(players):
"""Create instances of Player to store in the table players"""
for player in players:
dbplayer = Player(
name=player['player_name'],
team=player['team'],
points=player['PTS'],
assists=player['AST'],
blocks=player['BLK'],
field_goal_percent=player['field_percent'],
three_percent=player['three_percent'],
minutes_played=player['minutes_played'])
db.session.add(dbplayer)
def add_player_data():
"""Retrieve the API data (player stats) and add all the player stats to local db.
Need to call this function manually in order to seed the players table."""
r = requests.get(f"https://nba-stats-db.herokuapp.com/api/playerdata/season/2023")
resp = r.json()
players = resp['results']
make_dbplayers(players)
# resp contains 100 players per response
while resp['next']:
r = requests.get(resp['next'])
resp = r.json()
players = resp['results']
make_dbplayers(players)
db.session.commit()
return
def data_check():
"""check to see if there is currently data in the local database and if not add the data"""
if Player.query.count() == 0:
add_player_data()
return
data_check()
def authorization_check():
"""check to see if the user is logged in by looking for the USER_ID in the session"""
if not session.get("USER_ID", None) == None:
return True
def get_random(num_records, model):
"""function to get random records of a specified amount"""
existing_ids = [model.id for model in model.query.with_entities(model.id).all()]
random_indices = random.sample(existing_ids, min(num_records, len(existing_ids)))
random_records = model.query.filter(model.id.in_(random_indices)).all()
return random_records
####################### general user routes ###########################
@app.route("/")
def show_home():
"""Shows home page"""
lists = get_random(6, List)
return render_template("home.html", lists=lists)
@app.route('/register', methods=["POST", "GET"])
def register_new_user():
"""Show registration form and handle creating new user instance and store in database"""
form = RegisterForm()
if form.validate_on_submit():
username = form.username.data
first_name = form.first_name.data
last_name = form.last_name.data
password = form.password.data
email = form.email.data
new_user = User.register(username=username,
first_name=first_name,
last_name=last_name,
password=password,
email=email)
db.session.add(new_user)
db.session.commit()
run_login(new_user)
return redirect(f"/user/{new_user.id}/details")
return render_template('register.html', form=form)
@app.route("/user/<int:id>/details")
def show_user_profile(id):
"""If user logged in shows user profile with all user details."""
if authorization_check():
user = User.query.get_or_404(id)
lists = user.lists
return render_template("user-profile.html", user=user, lists=lists)
else:
flash("Invalid Credentials", "danger")
return redirect("/")
@app.route("/logout")
def logout():
"""If user logged in logout user by removing user info from session."""
session.pop("USERNAME")
session.pop("USER_ID")
return redirect("/")
@app.route("/login", methods=["GET", "POST"])
def login():
"""If given valid credentials login user by adding user ingo to session."""
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
user = User.authenticate(username, password)
if user:
flash(f"Welcome back {user.first_name}!", "success")
run_login(user)
return redirect(f"/user/{user.id}/details")
else:
flash("Invalid Username/Password", "danger")
return redirect("/login")
return render_template("login.html", form=form)
################## player search and comparison routes #########################
@app.route("/player/search")
def get_player_dbdata():
"""get player data from local db and return as JSON"""
p1 = request.args['player1']
p2 = request.args['player2']
player1 = Player.query.filter(unaccent(Player.name).ilike(f"%{p1}%")).first()
player2 = Player.query.filter(unaccent(Player.name).ilike(f"%{p2}%")).first()
response_json = jsonify(player1=player1.serialize(), player2=player2.serialize())
return ((response_json, 201))
@app.route("/player/details")
def show_player_search_details():
"""Handle user search for a specific player and display player stats"""
if not authorization_check():
flash("restricted access please login", "danger")
return redirect('/login')
if request.args.get("q"):
player_search = request.args.get("q")
players = Player.query.filter(unaccent(Player.name).ilike(f"%{player_search}%")).all()
if players == []:
flash("No data for player. Check spelling.", "danger")
return redirect('/player/details')
return render_template("players.html", players=players)
else:
players = get_random(6, Player)
return render_template("players.html", players=players)
@app.route("/player/comparison", methods=["POST", "GET"])
def compare_players():
"""Show page for comparing player statistics and add players to user draft list."""
if not authorization_check():
flash("restricted access please login", "danger")
return redirect('/login')
all_players_names = Player.query.order_by(Player.name)
form_choices = sorted(set([player.name for player in all_players_names]))
form1 = ComparePlayerForm()
form2 = ListForm()
form1.player1.choices = form_choices
form1.player2.choices = form_choices
if form2.validate_on_submit():
pg = db.session.query(Player.id).filter(Player.name.ilike(f"{form2.point_guard.data}")).first()
sg = db.session.query(Player.id).filter(Player.name.ilike(f"{form2.strong_guard.data}")).first()
sf = db.session.query(Player.id).filter(Player.name.ilike(f"{form2.small_forward.data}")).first()
pf = db.session.query(Player.id).filter(Player.name.ilike(f"{form2.power_forward.data}")).first()
c = db.session.query(Player.id).filter(Player.name.ilike(f"{form2.center.data}")).first()
name = form2.name.data
pg_id = pg[0]
sg_id = sg[0]
sf_id = sf[0]
pf_id = pf[0]
c_id = c[0]
user_id = session["USER_ID"]
list = List(name=name, pg_id=pg_id, sg_id=sg_id, sf_id=sf_id, pf_id=pf_id, c_id=c_id, user_id=user_id)
db.session.add(list)
db.session.commit()
list.add_to_playerlists()
return redirect(f"/user/{user_id}/details")
return render_template("compare.html", form1=form1, form2=form2)
####################### list routes ############################
@app.route("/list/<int:id>/delete")
def delete_draftlist(id):
"""Delete player list"""
if authorization_check():
List.query.filter_by(id=id).delete()
db.session.commit()
return redirect(f'/user/{session["USER_ID"]}/details')
else:
flash("Unauthorized please", "danger")
return redirect("/login")
| cshellen1/fantasy-draft-list | app.py | app.py | py | 9,165 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.sql.functions.ReturnTypeFromArgs",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name":... |
5866244481 | from pip import main
import numpy as np
import pandas as pd
import sklearn
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.preprocessing import FunctionTransformer, LabelEncoder, Normalizer, StandardScaler, OneHotEncoder
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score,train_test_split
from scipy import stats
from sklearn.linear_model import LinearRegression
from scipy.special import boxcox1p
import csv
from scipy.stats import norm, skew #for some statistics
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
def remove_outliers(dataset, threshold, columns=None, removed = False):
if columns==None:
numerics = ['int64','float64']
columns = dataset.select_dtypes(include=numerics).columns
tmp = dataset.copy()
z = np.abs(stats.zscore(tmp[columns]))
outliers = [row.any() for row in (z > threshold)]
outliers_idxs = tmp.index[outliers].tolist()
print("Number of removed rows = {}".format(len(outliers_idxs)))
if removed: return dataset.drop(outliers_idxs), tmp.loc[outliers]
else: return dataset.drop(outliers_idxs)
def convert_to_string(df, columns):
df[columns] = df[columns].astype(str)
return df
def none_transform(df):
''' Function that converts missing categorical values
into specific strings according to "conversion_list"
Returns the dataframe after transformation.
'''
conversion_list = [("Mas_Vnr_Type","None"),
("Bsmt_Qual","NA"),
("Electrical", "SBrkr"),
("Bsmt_Cond","TA"),
("Bsmt_Exposure","No"),
("BsmtFin_Type_1","No"),
("BsmtFin_Type_2","No"),
("Central_Air","N"),
("Condition_1","Norm"),
("Condition_2","Norm"),
("Exter_Cond","TA"),
("Exter_Qual","TA"),
("Fireplace_Qu","NA"),
("Functional","Typ"),
("Garage_Type","No"),
("Garage_Finish","No"),
("Garage_Qual","No"),
("Garage_Cond","No"),
("Heating_QC","TA"),
("Kitchen_Qual","TA"),
("MS_Zoning", "None"),
("Exterior_1st", "VinylSd"),
("Exterior_2nd", "VinylSd"),
("Sale_Type", "WD")]
for col, new_str in conversion_list:
df.loc[:, col] = df.loc[:, col].fillna(new_str)
return df
def preprocess(df,deleteTag=False,tag="train"):
# data_df = df.drop(["Street", "Utilities"], axis=1)
data_df = df.drop("PID", axis=1)
# print('data shape:',data_df.shape)
# exit()
if tag == 'train':
train_y = data_df["Sale_Price"]
else:
train_y = None
if deleteTag:
# print('Will Delete Rows')
clear_data = data_df.drop(data_df[(data_df['Gr_Liv_Area']>4000) & (data_df['Sale_Price']<300000)].index)
# exit()
else:
clear_data = data_df
### drop columns below ###
'''
['Street',
'Utilities',
'Condition_2',
'Roof_Matl',
'Heating',
'Pool_QC',
'Misc_Feature',
'Low_Qual_Fin_SF',
'Pool_Area',
'Longitude',
'Latitude']
'''
cols = ['Street',
'Utilities',
'Condition_2',
'Roof_Matl',
'Heating',
'Pool_QC',
'Misc_Feature',
'Low_Qual_Fin_SF',
'Pool_Area',
'Longitude',
'Latitude']
for col in cols:
# print(f'col:{col}')
clear_data = clear_data.drop(col, axis=1)
# print(f'clear_data:{clear_data.shape}')
# exit()
win_cols = ["Lot_Frontage",
"Lot_Area",
"Mas_Vnr_Area",
"BsmtFin_SF_2",
"Bsmt_Unf_SF",
"Total_Bsmt_SF",
"Second_Flr_SF",
'First_Flr_SF',
"Gr_Liv_Area",
"Garage_Area",
"Wood_Deck_SF",
"Open_Porch_SF",
"Enclosed_Porch",
"Three_season_porch",
"Screen_Porch",
"Misc_Val"]
for col in win_cols:
clear_data[col] = scipy.stats.mstats.winsorize(clear_data[col],limits=[0.0, 0.05])
# print(f'clear_data:{clear_data.shape}')
# exit()
train_y = clear_data["Sale_Price"]
clear_data.drop(['Sale_Price'], axis=1, inplace=True)
clear_data["Garage_Yr_Blt"] = clear_data.groupby('Neighborhood')["Garage_Yr_Blt"].transform(lambda x: x.fillna(x.median()))
# enc = OneHotEncoder(handle_unknown='ignore')
# clear_data = enc.fit_transform(clear_data)
# print(f'clear_data:{clear_data.shape}')
# print(f'clear_data:{clear_data}')
# exit()
# hot_one_features = clear_data
columns = clear_data.select_dtypes(include='object').columns.array
num_columns = clear_data.select_dtypes(include='number').columns.array
# for col in columns:
# clear_data[col] = clear_data[col].astype("category")
# print(columns)
# cache = clear_data['Paved_Drive'].unique()
# print('\n')
# print('col &unique:',cache)
# exit()
pd.to_numeric(clear_data['Lot_Shape'].replace({'Irregular':1,'Moderately_Irregular':2,'Slightly_Irregular':3,'Regular':4}, inplace=True))
pd.to_numeric(clear_data['Land_Slope'].replace({'Gentle_slope':3,'Moderate Slope':2,'Severe Slope':3}, inplace=True))
pd.to_numeric(clear_data['Overall_Cond'].replace({'Very_Excellent':10,'Excellent':9,'Very_Good':8,'Good':7,'Above Average':6,
'Average':5,'Below_Average':4,'Fair':3,'Poor':2,'Very_Poor':1}, inplace=True))
pd.to_numeric(clear_data['Overall_Qual'].replace({'Very_Excellent':10,'Excellent':9,'Very_Good':8,'Good':7,'Above Average':6,
'Average':5,'Below_Average':4,'Fair':3,'Poor':2,'Very_Poor':1}, inplace=True))
pd.to_numeric(clear_data['Exter_Qual'].replace({'Excellent':5,'Good':4,'Typical':3,'Fair':2,'Poor':1}, inplace=True))
pd.to_numeric(clear_data['Exter_Cond'].replace({'Excellent':5,'Good':4,'Typical':3,'Fair':2,'Poor':1}, inplace=True))
pd.to_numeric(clear_data['Bsmt_Qual'].replace({'Excellent':6,'Good':5,'Typical':4,'Fair':3,'Poor':2,'No_Basement':1}, inplace=True))
pd.to_numeric(clear_data['Bsmt_Cond'].replace({'Excellent':6,'Good':5,'Typical':4,'Fair':3,'Poor':2,'No_Basement':1}, inplace=True))
pd.to_numeric(clear_data['Bsmt_Exposure'].replace({'Gd':5,'Av':4,'Mn':3,'No':2,'No_Basement':1}, inplace=True))
pd.to_numeric(clear_data['BsmtFin_Type_1'].replace({'GLQ':7,'ALQ':6,'BLQ':5,'Rec':4,'LwQ':3,'Unf':2,'NA':1}, inplace=True))
pd.to_numeric(clear_data['BsmtFin_Type_2'].replace({'GLQ':7,'ALQ':6,'BLQ':5,'Rec':4,'LwQ':3,'Unf':2,'NA':1}, inplace=True))
pd.to_numeric(clear_data['Heating_QC'].replace({'Excellent':5,'Good':4,'Typical':3,'Fair':2,'Poor':1}, inplace=True))
pd.to_numeric(clear_data['Electrical'].replace({'SBrkr':5,'FuseA':4,'FuseF':3,'FuseP':2,'Mix':1}, inplace=True))
pd.to_numeric(clear_data['Kitchen_Qual'].replace({'Excellent':5,'Good':4,'Typical':3,'Fair':2,'Poor':1}, inplace=True))
pd.to_numeric(clear_data['Functional'].replace({'Typ':8,'Min1':7,'Min2':6,'Mod':5,'Maj1':4,'Maj2':3,'Sev':2,'Sal':1}, inplace=True))
pd.to_numeric(clear_data['Functional'].replace({'Excellent':6,'Good':5,'Typical':4,'Fair':3,'Poor':2,'No_Fireplace':1}, inplace=True))
pd.to_numeric(clear_data['Garage_Finish'].replace({'Fin':4,'RFn':3,'Unf':2,'NA':1}, inplace=True))
pd.to_numeric(clear_data['Garage_Qual'].replace({'Excellent':6,'Good':5,'Typical':4,'Fair':3,'Poor':2,'No_Garage':1}, inplace=True))
pd.to_numeric(clear_data['Paved_Drive'].replace({'Paved':3,'Partial_Pavement':2,'Dirt_Gravel':1}, inplace=True))
# exit()
# for col in columns:
# cache = clear_data[col].unique()
# print('\n')
# print('col &unique:',col)
# print('col &unique:',cache)
# exit()
# print(clear_data['Heating_QC'].head(5))
# print(clear_data['Central_Air'].head(5))
# print(f'columns:{columns}')
# print(f'num_columns:{num_columns}')
# print(f'clear data shape:{clear_data}')
# exit()
# print('clear_data:',clear_data['Exter_Qual'])
# for c in columns:
# # enc = OneHotEncoder(handle_unknown='ignore',sparse=False)
# lbl = LabelEncoder()
# lbl.fit(list(clear_data[c].values))
# clear_data[c] = lbl.transform(list(clear_data[c].values))
# clear_data[c] = enc.fit_transform(np.array(clear_data[c]).reshape(-1,1))
# pd.to_numeric(scaled_X_test['Pool_QC'].replace({'Excellent':5,'Good':4,'Typical':3,'Fair':2,'Poor':1,'No_Pool':0}, inplace=True))
hot_one_features = (clear_data)
# hot_one_features = pd.get_dummies(clear_data)
# print(hot_one_features)
return hot_one_features,train_y
def preprocess_pipline(df,deleteTag=False,tag="train"):
if tag == "train":
# print(df)
# train_y = df['Sale_Price']
# data_df = df.drop(['Sale_Price'], axis=1)
data_df = df
re_data,train_y = preprocess(data_df,deleteTag,tag)
return re_data, train_y
else:
re_data,train_y = preprocess(df,deleteTag)
return re_data, None
if __name__ == '__main__':
train_data = pd.read_csv('train1.csv')
test_data = pd.read_csv('test1.csv')
test_y = pd.read_csv('test_y1.csv')
print('Load data is done!')
# re_train,re_y = preprocess_pipline(train_data,deleteTag = True,tag = "train")
# re_test,_ = preprocess_pipline(test_data,deleteTag = True,tag = "test")
re_train_,re_y_ = preprocess_pipline(train_data,deleteTag = False,tag = "train")
re_test_,_ = preprocess_pipline(test_data,deleteTag = False,tag = "test")
# print(f'train:{re_train.shape}')
# print(f'y:{re_y.shape}')
# print(f'test:{re_test.shape}')
print(f're_train:{re_train_.shape}')
print(f're_y:{re_y_.shape}')
print(f're_test:{re_test_.shape}')
| woshicqy/CS598PSL_Project1 | mymain_test.py | mymain_test.py | py | 10,484 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.abs",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scipy.stats.zscore",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "scipy.stats.mstats.winsorize... |
16965598918 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: Yuanyuan Shi
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import mean_squared_error
# Power outage class
def f(row):
"""function that categories days with more than 8 outages as extreme,
3-8 outages as bad, and 0-2 as normal"""
if row['Total_outages'] > 8:
val = 2
elif row['Total_outages'] > 2:
val = 1
else:
val = 0
return val
# Load data function: load data for neural network training
# Input: None
# Output: x_train, y_train, x_test, y_test
def load_data():
data = pd.read_csv('../../Data/WeatherOutagesAllJerry.csv')
data = data.dropna(how = 'all')
data['category'] = data.apply(f, axis=1)
data.head()
# Seperate training and testing dataset
train,test=train_test_split(data,test_size=0.1,random_state=567)
x_train = train[['Day_length_hr','Avg_Temp_F','Avg_humidity_percent','Avg_windspeed_mph','Max_windspeed_mph',
'Precipitation_in','Event_thunderstorm']]
y_train = train['category']
x_test = test[['Day_length_hr','Avg_Temp_F','Avg_humidity_percent','Avg_windspeed_mph','Max_windspeed_mph',
'Precipitation_in','Event_thunderstorm']]
y_test = test['category']
# data normalization
x_train = preprocessing.normalize(x_train) # training dataset
x_test = preprocessing.normalize(x_test) #testing dataset
return x_train, y_train, x_test, y_test
# Oversample algoritm
# This function oversample from under-reprented class
# Input: X-feature, y-response, R1-oversample ratio for bad case, R2-oversample ratio for extreme case
# Output: X_resam, y_resam
def balance_sample(X, y, R1, R2):
from imblearn.over_sampling import RandomOverSampler
# Apply the random over-sampling
ros = RandomOverSampler(ratio=R1,random_state=6)
x_res, y_res = ros.fit_sample(X[y!=2], y[y!=2])
ros2 = RandomOverSampler(ratio=R2,random_state=6)
x_res2, y_res2 = ros2.fit_sample(X[y!=1], y[y!=1])
X_resam = np.concatenate((x_res,x_res2[y_res2==2]), axis=0)
y_resam = np.concatenate((y_res, y_res2[y_res2==2]),axis=0)
return X_resam, y_resam
def neural_network_clf(x_train, y_train, x_test, y_test):
clf = MLPClassifier(max_iter=1000,activation='identity', solver='lbfgs',
alpha=1e-5,hidden_layer_sizes=(5, 3), random_state=1)
clf.fit(x_train, y_train)
y_train_pred = clf.predict(x_train)
y_test_pred = clf.predict(x_test)
print("Train error for normalized data",mean_squared_error(y_train,y_train_pred))
print("Test error for normalized data",mean_squared_error(y_test,y_test_pred))
| Yuanyuan-Shi/PowerOutagePredictor | PowerOutagePredictor/NeuralNetwork/nn_sandbox.py | nn_sandbox.py | py | 2,878 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.normalize",
"line_number": 48,
"usage_type": "call"
},
... |
23983268226 | import pygame
from MENU.Application import Application
class Versus(Application):
def __init__(self, game):
Application.__init__(self, game)
#self.state will give the start
self.state = "1 VS 1"
#all this will give the positions of the different texts that will be displayed
self.mode1x, self.mode1y = self.midw + 100, self.midh - 100
self.mode2x, self.mode2y = self.midw + 500, self.midh - 100
self.pointsx, self.pointsy = self.midw + 100, self.midh + 40
self.timex, self.timey = self.midw + 500, self.midh + 40
self.nextx, self.nexty = self.midw, self.midh + 200
#this self.back is a photot that takes the entire back of the screen
self.back = pygame.image.load('MENU\Back_Menu.png')
self.game.player_parameters = [["PLAYER1","Sanic", "keyboard1"],
["0COM2", "Alexander", "keyboard2"]]
self.game.game_parameters = ["score_3", "metal1"]
#the display_menu function is a function that takes the different functions present in the application to display the different texts
def display_menu(self):
self.run_display = True
while self.run_display:
self.game.check_events()
self.check_input()
self.game.background(self.back)
self.game.draw_text_2('Versus', 50, 250, 100)
self.game.draw_text("1 VS 1", 40, self.mode1x, self.mode1y)
self.game.draw_text("1 VS AI", 40, self.mode2x, self.mode2y)
self.game.draw_text("Points", 40, self.pointsx, self.pointsy)
self.game.draw_text("Time", 40, self.timex, self.timey)
self.game.draw_text_3("Game Mode", 40, self.midw - 400, self.mode1y)
self.game.draw_text_3("Type of Game", 40, self.midw - 400, self.pointsy)
self.game.draw_text_2("NEXT", 40, self.nextx, self.nexty + 25)
self.draw_cursor()
self.blit_screen()
#the move_cursor function is a function that will help us to move from on text to another using the keys of the keyboard
def move_cursor(self):
if self.game.DOWN_KEY:
self.play_sfx(r'MENU\button-3.wav')
if self.state == '1 VS 1':
self.cursor_rect.midtop = (self.mode2x + self.offset, self.mode2y)
self.state = '1 VS AI'
elif self.state == '1 VS AI':
self.cursor_rect.midtop = (self.pointsx + self.offset, self.pointsy)
self.state = 'Points'
elif self.state == 'Points':
self.cursor_rect.midtop = (self.timex + self.offset, self.timey)
self.state = 'Time'
elif self.state == 'Time':
self.cursor_rect.midtop = (self.nextx + self.offset, self.nexty)
self.state = 'NEXT'
elif self.state == 'NEXT':
self.cursor_rect.midtop = (self.mode1x + self.offset, self.mode1y)
self.state = '1 VS 1'
elif self.game.UP_KEY:
self.play_sfx(r'MENU\button-3.wav')
if self.state == '1 VS 1':
self.cursor_rect.midtop = (self.nextx + self.offset, self.nexty)
self.state = 'NEXT'
elif self.state == '1 VS AI':
self.cursor_rect.midtop = (self.mode1x + self.offset, self.mode1y)
self.state = '1 VS 1'
elif self.state == 'Points':
self.cursor_rect.midtop = (self.mode2x + self.offset, self.mode2y)
self.state = '1 VS AI'
elif self.state == 'Time':
self.cursor_rect.midtop = (self.pointsx + self.offset, self.pointsy)
self.state = 'Points'
elif self.state == 'NEXT':
self.cursor_rect.midtop = (self.timex + self.offset, self.timey)
self.state = 'Time'
elif self.game.BACK_KEY:
self.game.curr_menu = self.game.menu
self.play_sfx(r'MENU\book-cover-close-01.wav')
self.run_display = False
#the check_input function is a function that will retain the parameters wanted by the user, by clicking on NEXT, it will activate another file (selection file)
def check_input(self):
self.move_cursor()
if self.game.START_KEY:
self.play_sfx(r'MENU\button-3.wav')
if self.state == '1 VS 1':
self.game.player_parameters[1][0] = "PLAYER2"
elif self.state == '1 VS AI':
self.game.player_parameters[1][0] = "0COM2"
elif self.state == 'Points':
self.game.game_parameters[0] = "score_3"
elif self.state == 'Time':
self.game.game_parameters[0] = "time_90"
elif self.state == 'NEXT':
self.game.curr_menu = self.game.selection
self.run_display = False
| SlyLeoX/Cyber-Puck | Cyberpuck_ReleaseDirectory/MENU/Versus.py | Versus.py | py | 4,897 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "MENU.Application.Application",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "MENU.Application.Application.__init__",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "MENU.Application.Application",
"line_number": 6,
"usage_type": "name"
... |
19842239008 | import cv2
img = cv2.imread("aircraft.jpg")
img = cv2.resize(img, (512, 512))
img1 = cv2.imread("aircraft1.jpg")
img1 = cv2.resize(img1, (512, 512))
img1 = cv2.medianBlur(img1, 101)
img2 = cv2.imread("polygons.png")
img2 = cv2.medianBlur(img2, 25)
img2 = cv2.resize(img2, (512, 512))
if img.shape == img1.shape:
print("Same Size...")
else:
print("Not Same...")
diff = cv2.subtract(img, img1)
b, g, r = cv2.split(diff)
# print(b, g, r)
q0 = cv2.countNonZero(b)
q1 = cv2.countNonZero(g)
q2 = cv2.countNonZero(r)
print("blue:", q0, "\n", "gray:", q1, "\n", "red:", q2)
cv2.imshow("difference", diff)
cv2.waitKey(0)
cv2.destroyAllWindows()
| REISOGLU53/OpenCV-Python | 06_Alistirma/09_Resim_Karsilastirma.py | 09_Resim_Karsilastirma.py | py | 685 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 7,
... |
72521554913 | """
utils.py
Manejo de ciertos utilitarios como:
- Cargado de configuración
"""
import configparser
import os
import logging
import hashlib
from logging.handlers import RotatingFileHandler
#SETTINGS_FILENAME = 'settings.ini'
# De esta manera se ubica sobre el root del proyecto y no sobre /app
SETTINGS_FILENAME = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', '.settings.ini'))
LOGS_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', 'logs'))
def isSettingsFileThere(filename=SETTINGS_FILENAME) -> bool:
"""
Revisar si existe o no el archivo de settings
Args:
filename (str,optional): El archivo o ruta del settings. Defaults to SETTINGS_FILENAME.
Raises:
AssertionError: Si falla el assert de os.path.exists del path del archivo de settings
Returns:
bool: True si se encuentra el archivo, False si no se encuentra el archivo settings
"""
try:
assert os.path.exists(filename)
return True
except AssertionError:
logging.error(f'The settings file located at {filename} does not exist!')
return False
def isLogsDirectoryThere(filename=LOGS_DIRECTORY) -> bool:
"""
Revisar si existe o no el archivo de settings
Args:
filename (str,optional): El archivo o ruta del settings. Defaults to SETTINGS_FILENAME.
Raises:
AssertionError: Si falla el assert de os.path.exists del path del archivo de settings
Returns:
bool: True si se encuentra el archivo, False si no se encuentra el archivo settings
"""
try:
assert os.path.exists(filename)
return True
except AssertionError:
logging.error(f'The logs directory should exist at the following location: {filename}')
return False
def load_db_config() -> dict:
"""
Cargar la configuración de la BD del proyecto
Returns:
dict: diccionario con la configuración de la BD
"""
configuration = {}
config = configparser.ConfigParser()
if not isSettingsFileThere(SETTINGS_FILENAME):
print('*** ERROR ***, settings file not located!')
return {}
config.read(SETTINGS_FILENAME)
ambient = config.getboolean('GLOBAL', 'PRODUCTION')
if ambient == True:
configuration = {
"HOSTNAME": config.get('DATABASE-PROD', 'DB_HOST'),
"PORT": config.get('DATABASE-PROD', 'DB_PORT'),
"SID": config.get('DATABASE-PROD', 'DB_SID'),
"USERNAME": config.get('DATABASE-PROD', 'DB_USER'),
"PASSWORD": config.get('DATABASE-PROD', 'DB_PASSWORD'),
"DEBUG": config.getboolean('GLOBAL', 'DEBUG'),
"LOG_ROTATE": config.getboolean('GLOBAL', 'LOG_ROTATE')
}
else:
configuration = {
"HOSTNAME": config.get('DATABASE-TEST', 'DB_HOST'),
"PORT": config.get('DATABASE-TEST', 'DB_PORT'),
"SID": config.get('DATABASE-TEST', 'DB_SID'),
"USERNAME": config.get('DATABASE-TEST', 'DB_USER'),
"PASSWORD": config.get('DATABASE-TEST', 'DB_PASSWORD'),
"DEBUG": config.getboolean('GLOBAL', 'DEBUG'),
"LOG_ROTATE": config.getboolean('GLOBAL', 'LOG_ROTATE')
}
return configuration
def get_logger_dictConfig(useLogRotate:bool=False) -> dict:
dictConfig = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s|%(levelname)s|%(module)s - %(lineno)d: %(message)s",
"datefmt": "'%Y-%m-%d %H:%M:%S'"
}
},
"handlers": {
"console": {
"level": logging.DEBUG,
"class": "logging.StreamHandler",
"formatter": "default",
"stream": "ext://sys.stdout",
},
"logRotation": {
"level": logging.DEBUG,
"class": "logging.handlers.TimedRotatingFileHandler",
"formatter": "default",
"filename": os.path.join(LOGS_DIRECTORY, "teleton_smsapi.log"),
"when": "midnight",
"interval": 1,
"encoding": "utf8",
},
},
"loggers": {
"uvicorn.full_logger": {
"handlers": ["logRotation"] if useLogRotate else ["console"],
"level": logging.DEBUG,
"propagate": False
},
"uvicorn.discrete_logger": {
"handlers": ["logRotation"] if useLogRotate else ["console"],
"level": logging.ERROR,
"propagate": False
}
},
"root": {
"level": logging.DEBUG,
"handlers": ["logRotation"] if useLogRotate else ["console"]
}
}
return dictConfig
def getLoggerLevel(currentConf:dict):
"""
Retorna el logger que se debe usar
Args:
currentConf (dict): Recibe el dict con la configuracion actual
Returns:
str: segun el DEBUG de settings retorna o discrete_logger o full_loger
"""
if currentConf['DEBUG'] == True:
return 'uvicorn.full_logger'
else:
return 'uvicorn.discrete_logger'
def get_sha256str(input:str) -> str:
"""
Obtener el hex digest sha256 a partir de un texto plano
Args:
input (str): Texto a convertir en str
Returns:
str: Texto en sha256
"""
return hashlib.sha256(input.encode('utf-8')).hexdigest()
| jpablochaves/filter_api | app/shared/utils.py | utils.py | py | 5,559 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line... |
26928070232 | import PyQt5.QtCore as QtCore
from imperialism_remake import start
from imperialism_remake.base import constants, network
from imperialism_remake.server import server
def client_connect():
"""
Client tries to connect.
"""
client.connect_to_host(constants.NETWORK_PORT)
def send_shutdown():
"""
Send shutdown message.
"""
client.send(constants.C.SYSTEM, constants.M.SYSTEM_SHUTDOWN)
client.socket.flush()
if __name__ == '__main__':
start.fix_pyqt5_exception_eating()
start.set_start_directory()
# create server process and start it
server_process = server.ServerProcess()
server_process.start()
# create app in this process
app = QtCore.QCoreApplication([])
client = network.NetworkClient()
# actions
QtCore.QTimer.singleShot(100, client_connect)
QtCore.QTimer.singleShot(200, send_shutdown)
QtCore.QTimer.singleShot(300, app.quit)
app.exec_()
# wait for server
server_process.join()
| sumpfralle/imperialism-remake | test/network_process_start_shutdown_stresstest.py | network_process_start_shutdown_stresstest.py | py | 988 | python | en | code | null | github-code | 1 | [
{
"api_name": "imperialism_remake.base.constants.NETWORK_PORT",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "imperialism_remake.base.constants",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "imperialism_remake.base.constants.C",
"line_number": 16... |
43707222394 | import re
import nltk.tokenize
from six import text_type
from cakechat.utils.text_processing.config import SPECIAL_TOKENS
_END_CHARS = '.?!'
_tokenizer = nltk.tokenize.RegexpTokenizer(pattern=u'\w+|[^\w\s]')
def get_tokens_sequence(text, lower=True, check_unicode=True):
if check_unicode and not isinstance(text, text_type):
raise TypeError('text object should be unicode type')
if not text.strip():
return []
if lower:
text = text.lower()
tokens = _tokenizer.tokenize(text)
return tokens
def replace_out_of_voc_tokens(tokens, tokens_voc):
return [t if t in tokens_voc else SPECIAL_TOKENS.UNKNOWN_TOKEN for t in tokens]
def _capitalize_first_chars(text):
if not text:
return text
chars_pos_to_capitalize = [0] + [m.end() - 1 for m in re.finditer('[%s] \w' % _END_CHARS, text)]
for char_pos in chars_pos_to_capitalize:
text = text[:char_pos] + text[char_pos].upper() + text[char_pos + 1:]
return text
def get_pretty_str_from_tokens_sequence(tokens_sequence):
"""
Prettify chatbot's answer removing excessive characters and capitalizing first words of sentences.
Before: "hello world ! nice to meet you , buddy . do you like me ? I ' ve been missing you for a while . . . $$$"
After: "Hello world! Nice to meet you, buddy. Do you like me? I've been missing you for a while..."
"""
phrase = ' '.join(tokens_sequence)
phrase = phrase.replace(SPECIAL_TOKENS.EOS_TOKEN, '')
phrase = phrase.replace(SPECIAL_TOKENS.START_TOKEN, '')
phrase = phrase.replace(' \' ', '\'')
for ch in set(_END_CHARS) | {','}:
phrase = phrase.replace(' ' + ch, ch)
phrase = _capitalize_first_chars(phrase)
phrase = phrase.strip()
return phrase
| e11co/Astromind | Astrobaby-chat/cakechat/utils/text_processing/str_processor.py | str_processor.py | py | 1,777 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "nltk.tokenize.tokenize.RegexpTokenizer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.tokenize",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "nltk.tokenize",
"line_number": 10,
"usage_type": "name"
},
{
"... |
6328911031 | # -*- coding: utf-8 -*-
from __future__ import annotations
import json
import os
import typing as tp
from PySide2.QtWidgets import QLineEdit, QSpinBox
class model(object):
def __init__(self, *args, **kwargs):
super(model, self).__init__(*args, **kwargs)
def debug(self):
print("debug")
def rename(self, base_file_path, new_file_path):
os.rename(base_file_path, new_file_path)
return new_file_path
def replace(self, base_file_path, lineedit_old: QLineEdit, lineedit_new: QLineEdit):
base_dir, base_name, ext = model._separate_file_path(base_file_path)
new = lineedit_new.text()
old = lineedit_old.text()
print("base_file_path", base_file_path)
print("new", new)
print("old", old)
new_name = base_name.replace(old, new)
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
def consecutiveNumber(self, base_file_path: str,
line_edit: QLineEdit,
zero_padding: QSpinBox,
digits: QSpinBox):
base_dir, base_name, ext = model._separate_file_path(base_file_path)
between_text = line_edit.text()
index = zero_padding.value()
digits = digits.value()
digits_number = f"{index:0>{digits}}"
new_name = base_name + between_text + digits_number
print("between_text", between_text)
print("digits_number", digits_number)
print("new_name", new_name)
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
def fix(self, base_file_path: str, string_edit: QLineEdit, is_prefix=False):
base_dir, base_name, ext = model._separate_file_path(base_file_path)
fix_text = string_edit.text()
if is_prefix:
new_name = self.prefix(fix_text, base_name)
else:
new_name = self.suffix(fix_text, base_name)
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
def prefix(self, fix_text: str, base_name: str):
"""接頭辞
"""
return f"{fix_text}{base_name}"
def suffix(self, fix_text: str, base_name: str):
"""接尾辞
"""
return f"{base_name}{fix_text}"
def upperCase(self, base_file_path: str):
"""小文字→大文字
"""
base_dir, base_name, ext = model._separate_file_path(base_file_path)
new_name = base_name.upper()
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
def lowerCase(self, base_file_path: str):
"""大文字→小文字
"""
base_dir, base_name, ext = model._separate_file_path(base_file_path)
new_name = base_name.lower()
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
def capitalCase(self, base_file_path: str):
"""先頭大文字
"""
base_dir, base_name, ext = model._separate_file_path(base_file_path)
new_name = base_name.capitalize()
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
def titleCase(self, base_file_path: str):
"""先頭大文字
"""
base_dir, base_name, ext = model._separate_file_path(base_file_path)
new_name = base_name.title()
new_file_path = os.path.join(base_dir, f"{new_name}{ext}")
return new_file_path
@classmethod
def _separate_file_path(cls, base_file_path) -> tp.Tuple(str, str, str):
base_name, ext = os.path.splitext(os.path.basename(base_file_path))
base_dir = os.path.dirname(base_file_path)
return base_dir, base_name, ext
def load_json(self, path) -> tp.Dict[str]:
with open(path, mode="r", encoding="utf-8") as f:
self.__config = json.load(f)
return self.__config
...
def getOperationMode(self, operationName):
return self.__config["operations"][operationName]["mode"]
| RyotaUnzai/pairpro | renamer/renamerModel.py | renamerModel.py | py | 4,117 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.rename",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QLineEdit",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
73152367715 | from aws_cdk import Stack, Duration
from constructs import Construct
import aws_cdk.aws_lambda as _lambda
import aws_cdk.aws_iam as iam
import aws_cdk.aws_stepfunctions_tasks as tasks
import aws_cdk.aws_stepfunctions as sfn
import aws_cdk.aws_sns as sns
from aws_cdk.aws_dynamodb import Table
class PostProcessStack(Stack):
"""Make a step function state machine with lambdas doing the work."""
def __init__(self, scope: Construct, id: str,
lambda_tracing,
ddb_table: Table,
gamelog_lambda: _lambda.Function,
**kwargs) -> None:
super().__init__(scope, id, **kwargs)
fail_topic = sns.Topic(self, "Postprocessing Failure Topic")
# success_topic = sns.Topic(self, "Postprocessing Success Topic")
ddb_lambda_role = iam.Role(self, "Lambda-ddb-role",
role_name='rtcwpro-lambda-postprocessing-role',
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")
)
ddb_lambda_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'))
ddb_table.grant_read_write_data(ddb_lambda_role)
elo_lambda = _lambda.Function(
self, 'elo-lambda',
function_name='rtcwpro-elo',
code=_lambda.Code.from_asset('lambdas/postprocessing/elo'),
handler='elo.handler',
runtime=_lambda.Runtime.PYTHON_3_8,
role=ddb_lambda_role,
tracing=lambda_tracing,
timeout=Duration.seconds(30),
environment={
'RTCWPROSTATS_TABLE_NAME': ddb_table.table_name,
}
)
summary_lambda = _lambda.Function(
self, 'summary-lambda',
function_name='rtcwpro-stats-summary',
code=_lambda.Code.from_asset('lambdas/postprocessing/summary'),
handler='summary.handler',
runtime=_lambda.Runtime.PYTHON_3_8,
role=ddb_lambda_role,
tracing=lambda_tracing,
timeout=Duration.seconds(30),
environment={
'RTCWPROSTATS_TABLE_NAME': ddb_table.table_name,
}
)
discord_match_notify_lambda = _lambda.Function(
self, 'discord-match-notify-lambda',
function_name='rtcwpro-discord-match-notify',
code=_lambda.Code.from_asset('lambdas/postprocessing/discord'),
handler='discord-match-notify.handler',
runtime=_lambda.Runtime.PYTHON_3_8,
role=ddb_lambda_role,
tracing=lambda_tracing,
timeout=Duration.seconds(10),
environment={
'RTCWPROSTATS_TABLE_NAME': ddb_table.table_name,
}
)
# =============================================================================
# wsummary_lambda = _lambda.Function(
# self, 'wsummary-lambda',
# function_name='rtcwpro-wstats-summary',
# code=_lambda.Code.asset('lambdas/postprocessing/wsummary'),
# handler='wsummary.handler',
# runtime=_lambda.Runtime.PYTHON_3_8,
# role=ddb_lambda_role,
# tracing=lambda_tracing
# )
# =============================================================================
send_failure_notification = tasks.SnsPublish(self, "Postprocessing Failure",
topic=fail_topic,
integration_pattern=sfn.IntegrationPattern.REQUEST_RESPONSE,
message=sfn.TaskInput.from_text("Process Failure")
)
# success = tasks.SnsPublish(self, "Postprocessing Success",
# topic=success_topic,
# integration_pattern=sfn.IntegrationPattern.REQUEST_RESPONSE,
# message=sfn.TaskInput.from_text("Process success!")
# )
Round1Processing = tasks.LambdaInvoke(self, "Discord round1 notify", lambda_function=discord_match_notify_lambda)
Discordmatch = tasks.LambdaInvoke(self, "Discord match notify", input_path="$.matchid", lambda_function=discord_match_notify_lambda)
ELO = tasks.LambdaInvoke(self, "Calculate Elo", input_path="$.matchid", result_path="$.Payload", lambda_function=elo_lambda).next(Discordmatch)
Summary = tasks.LambdaInvoke(self, "Summarize stats", input_path="$.matchid", lambda_function=summary_lambda)
Gamelog = tasks.LambdaInvoke(self, "Process gamelog", input_path="$.matchid", lambda_function=gamelog_lambda)
Round2Processing = sfn.Parallel(self, "Do the work in parallel")
Round2Processing.branch(ELO)
Round2Processing.branch(Summary)
Round2Processing.branch(Gamelog)
Round2Processing.add_catch(send_failure_notification)
# Round2Processing.next(success)
choice = sfn.Choice(self, "Round 1 or 2")
choice.when(sfn.Condition.number_equals("$.roundid", 1), Round1Processing)
choice.when(sfn.Condition.number_equals("$.roundid", 2), Round2Processing)
choice.otherwise(send_failure_notification)
postproc_state_machine = sfn.StateMachine(self, "ProcessMatchData",
definition=choice,
timeout=Duration.minutes(5),
state_machine_type=sfn.StateMachineType.EXPRESS
)
self.postproc_state_machine = postproc_state_machine
| donkz/rtcwprostats | stacks/postprocess.py | postprocess.py | py | 5,841 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "aws_cdk.Stack",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "constructs.Construct",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "aws_cdk.aws_dynamodb.Table",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "aws_cdk... |
42938740587 | import argparse
import getpass
import json
import difflib
import sys
import requests
import colorama
from colorama import Fore
from zjusess import zjusess
from scorenotification import scorenotification
# 用于中文对齐输出
def pad_len(string, length):
return length - len(string.encode('GBK')) + len(string)
class LOG:
info = Fore.CYAN + 'Info: ' + Fore.RESET
warning = Fore.YELLOW + 'Warning: ' + Fore.RESET
error = Fore.RED + 'Error: ' + Fore.RESET
done = Fore.GREEN + 'Done: ' + Fore.RESET
tips = Fore.MAGENTA + 'Tips: ' + Fore.RESET
default = ''
def print_log(log : LOG, *args, **kwargs):
print(log, end='')
print(*args, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ZJU Score Assistant')
parser.add_argument('-i', '--initial', action='store_true', help='initialize your information')
parser.add_argument('-u', '--update', action='store_true', help='update the course score')
parser.add_argument('-ls', '--list', nargs='*', metavar=('YEAR', 'SEMESTER'), help='list the course and score in a certain year/semester')
parser.add_argument('-n', '--name', nargs='+', help='search score by the name of the course')
parser.add_argument('-g', '--gpa', nargs='*', metavar=('YEAR', 'SEMESTER'), help='calculator the gpa')
parser.add_argument('-d', '--ding', nargs='?', metavar=('DingWebhook'), default=argparse.SUPPRESS, help='set your DingTalk Robot Webhook. Empty means disabled')
parser.add_argument('-dn', '--dnotification', action='store_true', help='enable dingtalk score notification')
args = parser.parse_args()
colorama.init(autoreset=True)
if args.initial:
username = input("ZJUAM account's username: ")
password = getpass.getpass(f"ZJUAM {username}'s password: ")
database = {
'username': username,
'password': password,
}
session = zjusess()
try:
if not session.login(username, password):
print_log(LOG.error, 'Invalid username or password. Please check them again and use -i to reset them.')
sys.exit()
except requests.exceptions.ConnectionError:
print_log(LOG.error, 'Cannot connect to the Internet. Please check your Internet connection.')
else:
with open("database.json", 'w') as load_f:
load_f.write(json.dumps(database))
print_log(LOG.done, 'Initial Success!')
session.close()
data = {}
if args.update:
session = zjusess()
try:
with open('database.json', 'r') as f:
userdata = json.load(f)
except:
print_log(LOG.error, 'Cannot find your user data. Please use -i to initialize.')
sys.exit()
username = userdata['username']
password = userdata['password']
try:
res = session.login(username, password)
except requests.exceptions.ConnectionError:
print_log(LOG.error, 'Cannot connect to the Internet. Please check your Internet connection.')
else:
if not res:
print_log(LOG.error, 'Login failed. Please check your username and password. Remember to use -i to reset them.')
else:
#打开成绩查询网站
res = session.get(r'http://appservice.zju.edu.cn/zdjw/cjcx/cjcxjg')
res = session.post('http://appservice.zju.edu.cn/zju-smartcampus/zdydjw/api/kkqk_cxXscjxx')
data = dict(enumerate(res.json()['data']['list']))
with open('userscore.json', 'w') as f:
f.write(json.dumps(data))
print_log(LOG.done, 'Updated Successfully!')
session.close()
else:
try:
with open('userscore.json', 'r') as f:
data = json.load(f)
except:
print_log(LOG.error, 'Cannot find your score data, please use -u to update first.')
if args.list != None:
if len(args.list) == 0:
courses = data.values()
if len(courses) == 0:
print_log(LOG.info, f'Cannot find any courses during the whole college.')
print_log(LOG.tips, 'Maybe you need to use -u to update first :)')
else:
print(f'{"Semeter":16s}{"Name":20s}\tMark\tGP\tCredit')
for course in courses:
print('{0:<{len0}}{1:<{len1}}\t{2}\t{3}\t{4}'.format(
f"{course.get('xn')} {course.get('xq')}",
course.get('kcmc'),
course.get('cj'),
course.get('jd'),
course.get('xf'),
len0 = pad_len(f"{course.get('xn')} {course.get('xq')}", 16),
len1 = pad_len(course.get('kcmc'), 20)))
elif len(args.list) == 1:
courses = [i for i in data.values() if i.get('xn').find(args.list[0]) == 0]
if len(courses) == 0:
print_log(LOG.info, f'Cannot find any courses about the academic year of {args.list[0]}.')
print_log(LOG.tips, 'Maybe you need to use -u to update first :)')
else:
print(f'{"Semeter":16s}{"Name":20s}\tMark\tGP\tCredit')
for course in courses:
print('{0:<{len0}}{1:<{len1}}\t{2}\t{3}\t{4}'.format(
f"{course.get('xn')} {course.get('xq')}",
course.get('kcmc'),
course.get('cj'),
course.get('jd'),
course.get('xf'),
len0 = pad_len(f"{course.get('xn')} {course.get('xq')}", 16),
len1 = pad_len(course.get('kcmc'), 20)))
elif len(args.list) >= 2:
if len(args.list) > 2:
print_log(LOG.warning, f'The following argument(s) will be ignored:\n\t{" ".join(args.list[2:])}')
courses = [i for i in data.values() if i.get('xn').find(args.list[0]) == 0 and args.list[1].find(i.get('xq', '-1')) != -1]
if len(courses) == 0:
print_log(LOG.info, f'Cannot find any courses about the semester of {" ".join(args.list[:2])}')
print_log(LOG.tips, 'Maybe you need to use -u to update first :)')
else:
print(f'{"Semeter":16s}{"Name":20s}\tMark\tGP\tCredit')
for course in courses:
print('{0:<{len0}}{1:<{len1}}\t{2}\t{3}\t{4}'.format(
f"{course.get('xn')} {course.get('xq')}",
course.get('kcmc'),
course.get('cj'),
course.get('jd'),
course.get('xf'),
len0 = pad_len(f"{course.get('xn')} {course.get('xq')}", 16),
len1 = pad_len(course.get('kcmc'), 20)))
if args.name:
coursename = [i.get('kcmc') for i in data.values()]
res = []
for searchcourse in args.name:
res += difflib.get_close_matches(searchcourse, coursename, cutoff=0.3)
res = list(dict().fromkeys(res).keys())
if len(res) == 0:
print_log(LOG.info, f'Cannot find any course matching keyword(s) {" ".join(args.name)}')
else:
print(f'{"Semeter":16s}{"Name":20s}\tMark\tGP\tCredit')
for name in res:
for course in data.values():
if course.get('kcmc') == name:
print('{0:<{len0}}{1:<{len1}}\t{2}\t{3}\t{4}'.format(
f"{course.get('xn')} {course.get('xq')}",
course.get('kcmc'),
course.get('cj'),
course.get('jd'),
course.get('xf'),
len0 = pad_len(f"{course.get('xn')} {course.get('xq')}", 16),
len1 = pad_len(course.get('kcmc'), 20)))
if args.gpa != None:
if len(args.gpa) == 0:
grade = [i.get('jd') for i in data.values() if i.get('cj') not in ['合格', '不合格', '弃修']]
credit = [float(i.get('xf')) for i in data.values() if i.get('cj') not in ['合格', '不合格', '弃修']]
if len(grade) == 0:
print_log(LOG.info, f'Cannot find any courses during the whole college.')
print_log(LOG.tips, 'Maybe you need to use -u to update first :)')
else:
gp = .0
for i in range(len(grade)):
gp += grade[i] * credit[i]
totcredit = sum(credit)
gpa = 0
if totcredit != 0:
gpa = gp / totcredit
print_log(LOG.done, 'Your GPA during the whole college is %.2f and GP is %.2f' % (gpa, gp))
elif len(args.gpa) == 1:
grade = [i.get('jd') for i in data.values() if i.get('xn').find(args.gpa[0]) == 0 and i.get('cj') not in ['合格', '不合格', '弃修']]
credit = [float(i.get('xf')) for i in data.values() if i.get('xn').find(args.gpa[0]) == 0 and i.get('cj') not in ['合格', '不合格', '弃修']]
if len(grade) == 0:
print_log(LOG.info, f'Cannot find any courses about the academic year of {args.gpa[0]}')
print_log(LOG.tips, 'Maybe you need to use -u to update first :)')
else:
gp = .0
for i in range(len(grade)):
gp += grade[i] * credit[i]
totcredit = sum(credit)
gpa = .0
if totcredit != 0:
gpa = gp / totcredit
year = args.gpa[0]
for i in data.values():
if i.get('xn').find(args.gpa[0]) == 0:
year = i.get('xn')
break
print_log(LOG.done, 'Your GPA during the academic year of %s is %.2f and GP is %.2f' % (year, gpa, gp))
elif len(args.gpa) >= 2:
if len(args.gpa) > 2:
print_log(LOG.warning, f'The following argument(s) will be ignored:\n\t{" ".join(args.gpa[2:])}')
grade = [i.get('jd') for i in data.values() if i.get('xn').find(args.gpa[0]) == 0 and args.gpa[1].find(i.get('xq', '-1')) != -1 and i.get('cj') not in ['合格', '不合格', '弃修']]
credit = [float(i.get('xf')) for i in data.values() if i.get('xn').find(args.gpa[0]) == 0 and args.gpa[1].find(i.get('xq', '-1')) != -1 and i.get('cj') not in ['合格', '不合格', '弃修']]
if len(grade) == 0:
print_log(LOG.info, f'Cannot find any courses about the semester of {" ".join(args.gpa[:2])}')
print_log(LOG.tips, 'Maybe you need to use -u to update first :)')
else:
gp = .0
for i in range(len(grade)):
gp += grade[i] * credit[i]
totcredit = sum(credit)
gpa = .0
if totcredit != 0:
gpa = gp / totcredit
year = args.gpa[0]
semster = args.gpa[1]
for i in data.values():
if i.get('xn').find(args.gpa[0]) == 0:
year = i.get('xn')
break
print_log(LOG.done, 'Your GPA during the semester of %s %s is %.2f and GP is %.2f' % (year, semster, gpa, gp))
try:
if args.ding:
try:
with open('database.json', 'r') as f:
userdata = json.load(f)
except json.decoder.JSONDecodeError:
userdata = {}
userdata['url'] = args.ding
with open("database.json", 'w') as load_f:
load_f.write(json.dumps(userdata))
else:
try:
with open('database.json', 'r') as f:
userdata = json.load(f)
except json.decoder.JSONDecodeError:
userdata = {}
userdata['url'] = 'https://oapi.dingtalk.com/robot/send?access_token='
with open("database.json", 'w') as load_f:
load_f.write(json.dumps(userdata))
except AttributeError:
pass
if args.dnotification:
scorenotification() | PeiPei233/ZJUScoreAssistant | zjuscore.py | zjuscore.py | py | 12,488 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "colorama.Fore.CYAN",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RESET",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "colora... |
30562257637 | from collections import OrderedDict
import random
from string import ascii_uppercase
def rand_chars(length):
return ''.join(
random.choice(ascii_uppercase)
for i in xrange(length)
)
def is_int_in_range(val, low, high):
# inclusive, None is ok
if val is None:
return True
if not isinstance(val, int):
return False
return val >= low and val <= high
def near_merge(a, b):
if not a:
return b
if not b:
return a
la = len(a) + 1
lb = len(b) + 1
pa = random.randint(int(la * .4), int(la * .6))
pb = random.randint(int(lb * .4), int(lb * .6))
return (
near_merge(a[:pa], b[:pb]) +
near_merge(a[pa:], b[pb:])
)
def make_bag(letters):
vowels = [c for c in letters if c in 'AEIOU']
conson = [c for c in letters if c not in 'AEIOU']
random.shuffle(vowels)
random.shuffle(conson)
return ''.join(near_merge(vowels, conson))
def lru(size=100):
def wrapper(func):
func.__lru = OrderedDict()
def wrapped(*args, **kwargs):
key = tuple(args) + tuple(sorted(kwargs.iteritems()))
if key in func.__lru:
value = func.__lru[key]
del func.__lru[key]
else:
value = func(*args, **kwargs)
func.__lru[key] = value
while len(func.__lru) > size:
func.__lru.popitem(last=False)
return value
return wrapped
return wrapper
| justecorruptio/snatch_v3 | src/utils.py | utils.py | py | 1,513 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.choice",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "random.randint",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "random.randint"... |
32521435646 | from flask import Flask, request, render_template
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
from io import BytesIO
app = Flask(__name__)
model = load_model('brain_tumor_model.h5')
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
file = BytesIO(file.read())
img = image.load_img(file, target_size=(64, 64))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)
return render_template('result.html', prediction=str(prediction[0][0]))
return render_template('index.html')
# return 'Cancerous probability: ' + str(prediction[0][0])
# return '''
# <!doctype html>
# <title>Upload an image</title>
# <h1>Upload an image</h1>
# <form method=post enctype=multipart/form-data>
# <input type=file name=file>
# <input type=submit value=Upload>
# </form>
# '''
if __name__ == '__main__':
app.run(debug=True)
| alfalfs/Cancer_Detection_using_CCN | app.py | app.py | py | 1,086 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.req... |
17137250502 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
with open('boardgamelinks.txt', 'r') as links_file, open('boardgametime.txt', 'w') as file:
for link in links_file:
driver.get(link)
try:
elem = WebDriverWait(driver, 4).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "span[ng-if='min > 0']"))
)
elem = driver.find_elements(By.CSS_SELECTOR, "span[ng-if='min > 0']")[-1]
min = int(elem.text.strip())
# print(min)
elem = driver.find_elements(By.CSS_SELECTOR, "span[ng-if='max>0 && min != max']")
if len(elem) > 1:
max = int(elem[-1].text.strip().split("–")[1].strip())
else:
max = min
# print(max)
tot = (max-min)*0.75+min
# print(tot)
# print("\n")
file.write(str(tot) + "\n")
except Exception as e:
file.write("\n")
pass
| YKawesome/Python-Projects | Projects/Board Game Scraper/Times/boardgametimes.py | boardgametimes.py | py | 1,203 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 12,
"usage_type": "call"
},
{... |
24655058065 | from flask import Blueprint
from . import views
bp = Blueprint(
name="public",
import_name=__name__,
static_folder=None,
template_folder=None,
url_prefix="/",
)
bp.add_url_rule(
rule="/",
endpoint="index",
view_func=views.index,
methods=["GET"],
)
| harrelchris/template-flask | app/public/urls.py | urls.py | py | 287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
}
] |
11637400068 | import re
import math
import pandas as pd
import datetime
import itertools as it
import plotly.express as px
import plotly.graph_objects as go
from textwrap import wrap
from collections import ChainMap, OrderedDict
from .downloader import download
from more_itertools import partition
pd.set_option("mode.chained_assignment", None)
def formato_padrao(desp):
if desp.empty:
return desp
cols = list(desp.columns)
# -----------------------
# Renomeando as colunas
# -----------------------
anomes = {
'Ano e mês do lançamento' : 'ano_mes'
}
nome = {
c : re.sub(r'^Nome ', '', c) for c in cols if c.startswith('Nome ')
}
valor = {
c : c.replace(
'Valor', ''
).replace(
'a Pagar ', ''
).replace(
'(R$)', ''
).strip() for c in cols if c.startswith('Valor ')
}
novo_nome = ChainMap(anomes, nome, valor)
return desp[list(cols)].rename(
columns = novo_nome
).pipe(
lambda df: df.melt(
id_vars = [c for c in df.columns if c not in set(valor.values())],
value_name = 'valor',
var_name = 'modalidade'
)
)
def despesas(transform = lambda x: x, *args, **kwargs):
def url(mes, ano = 2020):
return f'https://raw.githubusercontent.com/brasil-em-numeros/dados-publicos/master/portaltransparencia/despesas-execucao/graficos/{ano:04d}{mes:02d}.csv'
meses = range(1, 13)
anos = range(2014, 2021)
url_list = [
url(mes, ano) for mes, ano in it.product(meses, anos)
]
def trans(x, *args, **kwargs):
return transform(
formato_padrao(pd.read_csv(x)), *args, **kwargs
)
url_list = download(url_list, trans, *args, **kwargs)
desp = filter(lambda df: not df.empty, url_list)
# desp = pd.concat(desp, sort = False)
return desp
date_dict = OrderedDict(
jan = "jan",
feb = "fev",
mar = "mar",
apr = "abr",
may = "mai",
jun = "jun",
jul = "jul",
aug = "ago",
sep = "set",
oct = "out",
nov = "nov",
dec = "dez",
January = "Janeiro",
February = "Fevereiro",
March = "Março",
April = "Abril",
May = "Maio",
June = "Junho",
July = 'Julho',
August = "Agosto",
September = "Setembro",
October = "Outubro",
November = "Novembro",
December = "Dezembro"
)
# ----------------------------
# Gráficos sobre as despesas
# ----------------------------
# 1 - heatmap por mês e ano
def heatmap_data(desp):
if desp.empty:
return desp
return desp.query(
"modalidade == 'Pago'"
).query(
"valor != 0"
).assign(
data = lambda df: pd.to_datetime(
df['ano_mes'] + "/01", format = "%Y/%m/%d"
)
).assign(
mes = lambda df: df['data'].apply(lambda d: d.strftime("%b").lower()),
ano = lambda df: df['data'].dt.year
).groupby(
['ano', 'mes'],
as_index = False
).sum()
def heatmap_chart(data_heat):
data_heat = data_heat.pivot(
'ano', 'mes', 'valor'
).sort_index().fillna(0)
def key(x):
for i, k in enumerate(date_dict.keys()):
if x == k:
break
return i
data_heat = data_heat[sorted(data_heat.columns, key = key)]
fig = px.imshow(
data_heat.to_numpy(),
x = [date_dict.get(c, c).title() for c in data_heat.columns],
y = [str(i) for i in data_heat.index],
labels = dict(color = "Gastos pagos"),
color_continuous_scale = "RdYlGn_r"
)
return fig
def funcao_data(desp):
if desp.empty:
return desp
df = desp.query(
"modalidade == 'Pago'"
).groupby(
['ano_mes', "Função"], as_index = False
).sum()
df['data'] = df['ano_mes'] + "/01"
df['data'] = pd.to_datetime(df['data'], format = '%Y/%m/%d')
return df
def funcao_chart(data_funcao):
fig = go.Figure()
hover_template = "<b>%{text}</b><br>Gasto: %{y:$,.0f}"
funs = sorted(data_funcao['Função'].unique())
for fun in funs:
plot_data = data_funcao.loc[
data_funcao['Função'] == fun
].sort_values(['data'])
x = plot_data['data']
y = plot_data['valor']
fig.add_trace(
go.Scatter(
x = x,
y = y,
mode = "lines+markers",
name = fun,
text = plot_data['Função'],
line = {'shape' : 'spline'},
hovertemplate = hover_template
)
)
fig.update_layout(yaxis_title = "Valor Pago")
fig.update_layout(title = "Gastos por categoria")
return fig
def ministerios_data(desp):
if desp.empty:
return desp
df = desp.groupby(
['ano_mes', 'Órgão Superior'], as_index = False
).sum()
# ----------------------
# Top 10 ministérios
# ----------------------
top10 = df.drop(
columns = ['Órgão Superior'],
errors = 'ignore'
).groupby(
['ano_mes'], as_index = False
).rank(method = "min", ascending = False)
top10.columns = ['rank']
df = pd.merge(
df, top10,
how = "left",
left_index = True,
right_index = True
).assign(
ministerio = lambda x: [
"Outros" if r > 10 else m for m, r in zip(
x['Órgão Superior'],
x['rank']
)
]
).groupby(
['ano_mes', 'ministerio'],
as_index = False
).sum().drop(
columns = ['rank']
).assign(
data = lambda x: x['ano_mes'].apply(
lambda d: datetime.datetime.strptime(d + "/01", "%Y/%m/%d").strftime("%b/%Y")
)
)
return df
def ministerios_chart(ministerios_data):
df = ministerios_data.sort_values(['ano_mes'])
# --------------------------
# Calcula tamanho do eixo
# --------------------------
x_max = max(df['valor'])
zeros = int(math.log10(x_max))
x_final = x_max / pow(10, zeros)
if (x_final % 1) < 0.5:
x_final = int(x_final) + 0.5
else:
x_final = int(x_final) + 1.0
eixo_x = [0, x_final * pow(10, zeros)]
# ----------------------
# Calcula cada quadro
# ----------------------
# --- Temos que filtrar por ano_mes para
# --- o plotly ordernar os meses de forma
# --- correta
fig = go.Figure()
for (am, dt), grp in df.groupby(['ano_mes', 'data']):
ministerios, outro = partition(
lambda x: x == 'Outros', grp['ministerio']
)
ministerios = sorted(ministerios)
grp.loc[:, 'ministerio'] = pd.Categorical(
grp['ministerio'], it.chain(ministerios, outro)
)
grp = grp.sort_values(['ministerio'])
fig.add_trace(
go.Scatter(
visible=False,
name = dt,
x = grp['valor'],
y = grp['ministerio'].apply(
lambda s: "<br>".join(wrap(s, width = 30))
),
mode = "markers"
)
)
fig.data[0].visible = True
# Create and add slider
steps = []
for i in range(len(fig.data)):
step = dict(
method = "update",
args = [
{"visible": [False] * len(fig.data)}
], # layout attribute
label = fig.data[i].name
)
step["args"][0]["visible"][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [
dict(
active = 0,
currentvalue = {"prefix": "Data: "},
pad = {"t": 50},
steps = steps
)
]
fig.update_layout(
sliders = sliders,
title = "Top 10 ministérios com maior gasto no mês",
# -- Usar o abaixo com dados reais
xaxis = dict(range = eixo_x, autorange = False),
yaxis = dict(autorange = "reversed")
)
fig.update_xaxes( # the y-axis is in dollars
showgrid=False
)
return fig
| brasil-em-numeros/brasil-em-numeros | dashboard/provedores/pdt/despesas_publicas.py | despesas_publicas.py | py | 8,234 | python | pt | code | 1 | github-code | 1 | [
{
"api_name": "pandas.set_option",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "collections.ChainMap",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "itertools.product",
... |
10988878904 | import unittest
import torch
from torch import nn
from towhee.models.lightning_dot.bi_encoder import BiEncoder
class MockUniterEncoder(nn.Module):
"""
A Mock UniterEncoder
"""
@classmethod
def init_encoder(cls, config, checkpoint_path=None, project_dim=8):
print(
f"UniterEncoder init_encoder, config={config}, checkpoint_path={checkpoint_path}, project_dim={project_dim}")
return MockUniterEncoder()
def forward(self, input_ids, attention_mask, position_ids, img_feat, img_pos_feat, img_masks, gather_index):
print(f"forward, input_ids={input_ids}, attention_mask={attention_mask}, position_ids={position_ids}"
f"img_feat={img_feat}, img_pos_feat={img_pos_feat}, img_masks={img_masks}, gather_index={gather_index}")
return torch.ones(2, 1), torch.ones(2, 1), torch.ones(2, 1)
class MockBertEncoder(nn.Module):
"""
A Mock BertEncoder
"""
@classmethod
def init_encoder(cls, config, checkpoint_path=None, project_dim=8):
print(f"Encoder init_encoder, config={config}, checkpoint_path={checkpoint_path}, project_dim={project_dim}")
return MockBertEncoder()
def forward(self, input_ids, attention_mask, position_ids, img_feat, img_pos_feat, img_masks, gather_index):
print(f"forward, input_ids={input_ids}, attention_mask={attention_mask}, position_ids={position_ids}"
f"img_feat={img_feat}, img_pos_feat={img_pos_feat}, img_masks={img_masks}, gather_index={gather_index}")
return torch.ones(2, 1), torch.ones(2, 1), torch.ones(2, 1)
class MockArgs():
def __init__(self, img_model_type="uniter-base", txt_model_type="bert-base"):
self.img_model_type = img_model_type
self.txt_model_type = txt_model_type
self.img_model_config = "img_model_config"
self.txt_model_config = "txt_model_config"
self.img_checkpoint = "./"
self.txt_checkpoint = "./"
class TestLightningDOT(unittest.TestCase):
"""
Test LightningDOT model
"""
args = MockArgs()
model = BiEncoder(MockUniterEncoder(), MockBertEncoder(), args)
def test_bi_encoder(self):
batch = {
"imgs":
{
"input_ids": 1,
"attention_mask": 1,
"position_ids": 1,
"img_feat": 1,
"img_pos_feat": 1,
"img_masks": 1,
"gather_index": 1,
},
"txts":
{
"input_ids": 1,
"attention_mask": 1,
"position_ids": 1,
"img_feat": 1,
"img_pos_feat": 1,
"img_masks": 1,
"gather_index": 1,
},
"caps":
{
"input_ids": 1,
"attention_mask": 1,
"position_ids": 1,
"img_feat": 1,
"img_pos_feat": 1,
"img_masks": 1,
"gather_index": 1,
}
}
out = self.model(batch)
self.assertTrue(out[0].requires_grad is True)
self.assertTrue(out[1].requires_grad is True)
self.assertTrue(out[2].requires_grad is True)
if __name__ == "__main__":
unittest.main()
| towhee-io/towhee | tests/unittests/models/lightning_dot/test_lightning_dot.py | test_lightning_dot.py | py | 3,401 | python | en | code | 2,843 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.ones",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_nu... |
9569632216 | import requests
from datetime import datetime
import config
import time
MY_LAT = 46.482525 # Your latitude
MY_LONG = 30.723309 # Your longitude
# Function to check if ISS is overhead
def is_iss_overhead():
response = requests.get(url="http://api.open-notify.org/iss-now.json")
response.raise_for_status()
data = response.json()
iss_latitude = float(data["iss_position"]["latitude"])
iss_longitude = float(data["iss_position"]["longitude"])
return MY_LAT - 5 <= iss_latitude <= MY_LAT + 5 and MY_LONG - 5 <= iss_longitude <= MY_LONG + 5
parameters = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0,
}
# Function to check if it's night time
def is_night():
response = requests.get("https://api.sunrise-sunset.org/json", params=parameters)
response.raise_for_status()
data = response.json()
sunrise = int(data["results"]["sunrise"].split("T")[1].split(":")[0])
sunset = int(data["results"]["sunset"].split("T")[1].split(":")[0])
time_now = datetime.now()
hour_now = time_now.hour
return hour_now <= sunrise or hour_now >= sunset
# Main program loop
while True:
time.sleep(60) # Wait for 60 seconds
if is_iss_overhead() and is_night():
config.send_to_email() # Send email alert if ISS is overhead and it's night time
| sined277/30_API_REQUESTS_iss_over_head | main.py | main.py | py | 1,310 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
75168963873 | """This module includes api endpoints for auth."""
from fastapi import APIRouter, status, Depends
from dependencies import verify_api_key
from repositories.user import UserRepository
from schemas.base import ResponseSchema
from schemas.auth import (
LoginRequestSchema,
TokenRequestSchema,
DecodeResponseSchema,
TokensResponseSchema,
)
from utils.response import make_response
from utils.token import generate_token, decode_token
from exceptions import APIError, TokenError
from settings import JWT_SECRET_KEY, ACCESS_JWT_EXP_DAYS, REFRESH_JWT_EXP_DAYS
from constants import (
ID_KEY,
REFRESH_ACCESS_TOKEN_EXP_KEY,
REFRESH_ACCESS_TOKEN_KEY,
ACCESS_TOKEN_KEY,
ACCESS_TOKEN_EXP_KEY,
MONOBANK_TOKEN_KEY,
)
router = APIRouter()
@router.post(
"/login",
response_model=TokensResponseSchema,
status_code=status.HTTP_200_OK,
responses={
404: {"model": ResponseSchema, "description": "User not found"},
422: {"model": ResponseSchema, "description": "Validation error"},
500: {"model": ResponseSchema, "description": "Internal Error"},
},
)
async def login(login_input: LoginRequestSchema):
"""Login a user if the supplied credentials are correct."""
try:
user = await UserRepository.get_user(MONOBANK_TOKEN_KEY, login_input.monobank_token)
except APIError as exc:
return make_response(
success=False,
http_status=exc.status_code,
subcode=exc.subcode,
message=exc.message,
data=exc.data,
)
token, token_exp = generate_token(
secret_key=JWT_SECRET_KEY,
private_claims={ID_KEY: str(user.id)},
exp_days=ACCESS_JWT_EXP_DAYS,
)
refresh_token, refresh_token_exp = generate_token(
secret_key=JWT_SECRET_KEY,
private_claims={ID_KEY: str(user.id)},
exp_days=REFRESH_JWT_EXP_DAYS,
)
return TokensResponseSchema(
success=True,
message="User was successfully authorized",
data={
ACCESS_TOKEN_KEY: token,
ACCESS_TOKEN_EXP_KEY: token_exp,
REFRESH_ACCESS_TOKEN_KEY: refresh_token,
REFRESH_ACCESS_TOKEN_EXP_KEY: refresh_token_exp,
},
)
@router.post(
"/refresh",
response_model=TokensResponseSchema,
status_code=status.HTTP_200_OK,
responses={
401: {"model": ResponseSchema, "description": "Unauthorized"},
422: {"model": ResponseSchema, "description": "Validation error"},
500: {"model": ResponseSchema, "description": "Internal Error"},
},
)
def refresh(refresh_input: TokenRequestSchema):
"""Return refreshed access token for a user."""
try:
payload = decode_token(refresh_input.token, JWT_SECRET_KEY)
except TokenError as exc:
return make_response(
success=False,
http_status=status.HTTP_401_UNAUTHORIZED,
subcode=exc.subcode,
message=exc.message,
)
user_id = payload[ID_KEY]
token, token_exp = generate_token(
secret_key=JWT_SECRET_KEY,
private_claims={ID_KEY: str(user_id)},
exp_days=ACCESS_JWT_EXP_DAYS,
)
refresh_token, refresh_token_exp = generate_token(
secret_key=JWT_SECRET_KEY,
private_claims={ID_KEY: str(user_id)},
exp_days=REFRESH_JWT_EXP_DAYS,
)
return TokensResponseSchema(
success=True,
message="Token was successfully refreshed",
data={
ACCESS_TOKEN_KEY: token,
ACCESS_TOKEN_EXP_KEY: token_exp,
REFRESH_ACCESS_TOKEN_KEY: refresh_token,
REFRESH_ACCESS_TOKEN_EXP_KEY: refresh_token_exp,
},
)
@router.post(
"/decode",
response_model=ResponseSchema,
status_code=status.HTTP_200_OK,
dependencies=[Depends(verify_api_key)],
responses={
401: {"model": ResponseSchema, "description": "Unauthorized"},
422: {"model": ResponseSchema, "description": "Validation error"},
500: {"model": ResponseSchema, "description": "Internal Error"},
},
)
def decode(
decode_input: TokenRequestSchema,
):
"""Try to decode provided token."""
try:
payload = decode_token(decode_input.token, JWT_SECRET_KEY)
except TokenError as exc:
return make_response(
success=False,
http_status=status.HTTP_401_UNAUTHORIZED,
subcode=exc.subcode,
message=exc.message,
)
user_id = payload[ID_KEY]
return DecodeResponseSchema(
success=True,
message="Token was successfully decoded",
data={ID_KEY: user_id},
)
| Monoboard/monoboard.api.auth | src/api/auth.py | auth.py | py | 4,661 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "schemas.auth.LoginRequestSchema",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "repositories.user.UserRepository.get_user",
"line_number": 44,
"usage_type": "call"
}... |
71103767073 | # coding: utf-8
import pygame as pg
import numpy as np
class GameOfLife():
def __init__(self, width=640, height=480, cell_size=20, FPS=30):
self.width = width
self.height = height
self.cell_size = cell_size
self.screen = pg.display.set_mode((width, height))
self.Ncol = self.width // self.cell_size
self.Nrow = self.height // self.cell_size
self.FPS = FPS
self.grid = np.zeros((self.Nrow, self.Ncol), dtype="int")
self.next_grid = np.zeros((self.Nrow, self.Ncol), dtype="int")
def draw_lines(self):
for x in range(self.cell_size, self.width, self.cell_size):
pg.draw.line(self.screen, pg.Color("black"), (x, self.cell_size), (x, self.height-self.cell_size))
pg.draw.line(self.screen, pg.Color("black"), (0, 0), (self.width, 0))
for y in range(self.cell_size, self.height, self.cell_size):
pg.draw.line(self.screen, pg.Color("black"), (self.cell_size, y), (self.width-self.cell_size, y))
def draw_cell(self, nrow, ncol, status):
color = "white"
if status == 1: color = "orange"
x = self.cell_size * ncol
y = self.cell_size * nrow
pg.draw.rect(self.screen, pg.Color(color), (x, y, self.cell_size, self.cell_size))
def next_status(self):
for i in range(1, self.Nrow-1):
for j in range(1, self.Ncol-1):
if self.grid[i][j] == 0:
if np.sum(self.grid[i-1:i+2, j-1:j+2]) == 3:
self.next_grid[i][j] = 1
self.draw_cell(i, j, 1)
else:
if np.sum(self.grid[i-1:i+2, j-1:j+2])-1 == 2 or np.sum(self.grid[i-1:i+2, j-1:j+2])-1 == 3:
self.next_grid[i][j] = 1
else:
self.draw_cell(i, j, 0)
def run(self):
pg.init()
pg.display.set_caption('Game of Life')
self.screen.fill(pg.Color('white'))
fnt = pg.font.Font(None, int(1.5*self.cell_size))
clock = pg.time.Clock()
run = False
running = True
while running:
clock.tick(self.FPS)
for event in pg.event.get():
if event.type == pg.QUIT: running = False
if event.type == pg.MOUSEBUTTONDOWN and \
event.pos[0]>self.cell_size and event.pos[0]<self.width-self.cell_size and \
event.pos[1]>self.cell_size and event.pos[1]<self.height-self.cell_size:
ncol = event.pos[0] // self.cell_size
nrow = event.pos[1] // self.cell_size
if event.button == 1:
self.grid[nrow][ncol] = 1
self.draw_cell(nrow, ncol, 1)
if event.button == 3:
self.grid[nrow][ncol] = 0
self.draw_cell(nrow, ncol, 0)
if event.type == pg.KEYUP and (event.key == pg.K_KP_ENTER or event.key == pg.K_RETURN): run = (run == False)
if run:
text = fnt.render("Stopped", False, pg.Color("white"))
self.screen.blit(text, (0, 0))
text = fnt.render("Running...", False, pg.Color("black"))
self.screen.blit(text, (0, 0))
# print(self.grid)
self.next_status()
# print(self.next_grid)
self.grid = self.next_grid.copy()
self.next_grid = np.zeros((self.Nrow, self.Ncol), dtype="int")
else:
text = fnt.render("Running...", False, pg.Color("white"))
self.screen.blit(text, (0, 0))
text = fnt.render("Stopped", False, pg.Color("black"))
self.screen.blit(text, (0, 0))
self.draw_lines()
pg.display.flip()
pg.quit()
# game = GameOfLife(500, 500, 100, 2)
game = GameOfLife()
game.run()
| astro-kaba4ek/Python_5 | DZ4/game_of_life.py | game_of_life.py | py | 3,257 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",... |
21340483022 | from collections import defaultdict
from typing import List
class Node:
def __init__(self, val):
self.val = val
self.dfn = -1
self.low_dfn = -1
self.children = []
class ArticulationFinder:
def __init__(self):
self.nodes = defaultdict(list)
self.ap = set()
self.dfn = 1
self.visited = set()
self.root = None
def find_articulation_point(self,connections: List[List[int]]):
for connection in connections:
self.create_connection(connection)
self.root = 1
self.nodes[1].low_dfn = 1
self.dfs(1)
return list(self.ap)
def dfs(self, parent):
node = self.nodes[parent]
node.dfn = self.dfn
node.low_dfn = self.dfn
self.dfn += 1
self.visited.add(node.val)
for child in node.children:
if child.val in self.visited:
node.low_dfn = child.dfn if node.low_dfn > child.dfn else node.low_dfn
else:
low_dfn = self.dfs(child.val)
node.low_dfn = low_dfn if node.low_dfn > child.low_dfn else node.low_dfn
if child.low_dfn >= node.dfn and node.val != self.root:
self.ap.add(node.val)
return node.low_dfn
def create_connection(self, connection:List[int]):
src = None
if connection[0] in self.nodes:
src = self.nodes[connection[0]]
else:
src = Node(connection[0])
self.nodes[connection[0]] = src
dest = None
if connection[1] in self.nodes:
dest = self.nodes[connection[1]]
else:
dest = Node(connection[1])
self.nodes[connection[1]] = dest
src.children.append(dest)
dest.children.append(src) | kannanParamasivam/datastructures_and_algorithm | graph/problems/find_articulation_points.py | find_articulation_points.py | py | 1,991 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 54,
"usage_type": "name"
}
] |
32903456096 | #!/usr/bin/python
# encoding: utf-8
#主要是对爬取39健康网的相关方法的封装相关操作的封装
import requests
from pyquery import PyQuery as pq
class health39util(object):
def __init__(self):
pass
def parse_disease(self, url):
'''
解析疾病页面
'''
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.1.6000'}
html = requests.get(url,headers=headers)
html.encoding = 'gbk'
d = pq(html.text)
#疾病名称
name = d('div.spreadhead > div.tit.clearfix > a > h1').eq(0).text()
intro = d('dl.intro > dd').eq(0).text()
if intro.endswith('详细>>'):
intro = intro[:-4]
dds = d('div.info > ul > li')
dict1 = dict()
dict1['url'] = url
dict1['疾病名称'] = name
dict1['简介'] = intro
for i in range(len(dds)):
label = dds.eq(i)('i').eq(0).text()
s = dds.eq(i).text()
if s.endswith('[详细]'):
s = s[:-4].strip()
ss = [i.strip() for i in s.split(':')]
content = dds.eq(i)('a')
if content and ss[0] in ['典型症状','临床检查','并发症','手术','常用药品']:
ll = list()
for ii in range(len(content)):
if content.eq(ii).attr.title:
ll.append(content.eq(ii).attr.title)
dict1[ss[0]] = ll
else:
dict1[ss[0]] = ss[1]
drug = d('.drug > ul >li').eq(0)
if drug:
aa = drug('a')
if aa:
ll = list()
for i in range(len(aa)):
if aa.get(i).attr.title:
ll.append(aa.get(i).attr.title)
dict1[drug('i').text()[:-1]] = ll
return dict1 | mayi140611/crawl | requestProj/39jiankang/health39util.py | health39util.py | py | 2,018 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyquery.PyQuery",
"line_number": 19,
"usage_type": "call"
}
] |
72550547235 | from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.svm import SVC
# 파일 load
df = pd.read_excel('C:/BCI Data/ratio file/trainset588.xlsx')
x = df[df.columns[1,4]] # ratio_mu, ratio_theta, ratio_beta
y = df['ClickorNot'] # y label (0 or 1)
X_train, X_test, y_train, y_test = train_test_split(x, y , test_size = 0.2, random_state=123)
# 모델 학습
model = SVC(kernel='rbf', gamma=0.01).fit(X_train, y_train)
# 평가
print("훈련 세트 정확도: {:.2f}".format(model.score(X_train, y_train)))
print("테스트 세트 정확도: {:.2f}".format(model.score(X_test, y_test)))
| seungcholcho/gazetracker | src/SVM_Classifier.py | SVM_Classifier.py | py | 622 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_excel",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 14,
"usage_type": "call"
}
] |
74090940833 | import os
import sys
import leveldb
import logging
import fileinput
from wikiref.util import flush_dict_to_ldb
from wikiref.settings import LDB_ARRAY_DELIM
from wikiref.settings import INDEX_TAXONOMY_REL
from wikiref.settings import INDEX_YAGO_TSV_DELIM
from wikiref.settings import INDEX_YAGO_TAXONOMY_DIRNAME
logging.basicConfig(level=logging.INFO)
try:
_, yago_taxonomy_file, output_dir = sys.argv
except Exception:
logging.error("usage: %s <yago_taxonomy_file> <output_dir>" % __file__)
exit(1)
CUR_SIZE = 0
MAX_SIZE = 100000 * 128
LDB = leveldb.LevelDB(os.path.join(output_dir, INDEX_YAGO_TAXONOMY_DIRNAME))
input_fl = fileinput.input((
yago_taxonomy_file,
))
index_cache = dict()
for line in input_fl:
row = line.split(INDEX_YAGO_TSV_DELIM)
rel = row[2]
if rel != INDEX_TAXONOMY_REL:
continue
child_class = row[1]
parent_class = row[3]
if len(child_class) == 0 or len(parent_class) == 0:
continue
if child_class in index_cache:
index_cache[child_class].add(parent_class)
else:
index_cache[child_class] = {parent_class}
CUR_SIZE += 1
if CUR_SIZE > MAX_SIZE:
flush_dict_to_ldb(LDB, index_cache)
index_cache = dict()
CUR_SIZE = 0
flush_dict_to_ldb(LDB, index_cache)
input_fl.close()
logging.info("[DONE]") | zaycev/wikiref | scripts/run_index_taxonomy.py | run_index_taxonomy.py | py | 1,343 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
... |
2764211799 | # Importing necessary libraries...
import collections
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from math import *
class GaussianNaiveBayes:
total_prob = None
mean, variance, n_class = None, None, None
y_dict = None
n_features = None
this_class_prior = None
# calculate the prior distribution for each class in the label
# label has to be in ints starts from 0
def class_prior(self, y):
y_dict = collections.Counter(y)
n_class = len(y_dict)
this_class_prior = np.ones(n_class)
for i in range(n_class):
this_class_prior[i] = y_dict[i]/y.shape[0]
return this_class_prior, n_class, y_dict
# calculate the conditional mean and variance
def mean_variance(self,X, y):
n_features = X.shape[1]
m = np.ones((self.n_class, n_features))
v = np.ones((self.n_class, n_features))
xs = []
for c in range(self.n_class):
xs.append( np.array([X[i] for i in range(X.shape[0]) if y[i] == c]))
#xs = np.array(xs)
for c in range(self.n_class):
for j in range(n_features):
m[c][j] = np.mean(xs[c].T[j])
v[c][j] = np.var(xs[c].T[j], ddof=1)
return m, v, n_features # mean and variance
def prob_feature_class(self, x):
m = self.mean
v = self.variance
n_sample = x.shape[0]
pfc = np.ones(( n_sample, self.n_class))
for s in range(n_sample):
for i in range(self.n_class):
pfc[s][i] = np.prod([(1/sqrt(2*3.14*v[i][j])) * exp(-0.5* pow((x[s][j] - m[i][j]),2)/v[i][j]) for j in
range(self.n_features)])
return pfc
def fit(self,X, y):
self.this_class_prior, self.n_class, self.y_dict = self.class_prior(y)
self.mean, self.variance, self.n_features = self.mean_variance(X, y)
def predict(self, x):
n_sample = x.shape[0]
pfc = self.prob_feature_class(x)
pcf = np.ones(( n_sample, self.n_class))
for s in range(n_sample):
total_prob = 0
for i in range(self.n_class):
total_prob = total_prob + (pfc[s][i] * self.this_class_prior[i])
for i in range(self.n_class):
pcf[s][i] = (pfc[s][i] * self.this_class_prior[i])/total_prob
prediction = [int(pcf[s].argmax()) for s in range(n_sample)]
return prediction
if __name__=="__main__":
iris = datasets.load_iris()
print(iris.data[:5])
print(iris.target[:5])
X_train, X_test, y_train, y_test = train_test_split(
iris.data[iris.target < 2 ], iris.target[iris.target < 2], test_size=0.33)
naive_bayes = GaussianNaiveBayes()
naive_bayes.fit(X_train, y_train)
print(naive_bayes.mean)
print(naive_bayes.variance)
print(naive_bayes.this_class_prior)
prid, score = naive_bayes.predict(X_test)
print(prid)
print(score)
print(y_test)
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(y_test, score[:,1])
print('Average precision-recall score: {0:0.2f}'.format(
average_precision))
| zhuyuecai/Comp551Group53 | Yuecai/integration/mlModels/naive_bayes.py | naive_bayes.py | py | 3,375 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_numb... |
24000790808 | # reference 1 - https://www.youtube.com/watch?v=Dhc_fq5iCnU&list=PLpdmBGJ6ELULEfPWvvks0HtwzCvQo1zu0&index=5
# reference 2 - https://www.youtube.com/watch?v=9TxEQQyv9cE&list=PLpdmBGJ6ELULEfPWvvks0HtwzCvQo1zu0&index=8
# reference 3 - https://www.youtube.com/watch?v=MlK6SIjcjE8&t=322s
from llama_index import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LLMPredictor, ServiceContext, load_index_from_storage, StorageContext
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.storage_context import SimpleVectorStore, SimpleIndexStore
import torch
from langchain.llms.base import LLM
from transformers import pipeline
from typing import Mapping, Any
import streamlit as st
st.title('Ramayana-GPT')
prompt = st.text_input('Ask your question here')
#@st.cache
class CustomLLM(LLM):
model_name = "google/flan-t5-xl"
device = "cpu"
model_kwargs = {"device": device}
pipeline = pipeline("text2text-generation", model=model_name, **model_kwargs)
def _call(self, prompt, stop=None):
return self.pipeline(prompt, max_length=9999)[0]["generated_text"]
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"name_of_model": self.model_name}
def _llm_type(self):
return "custom"
#@st.cache
def return_index():
llm_predictor = LLMPredictor(llm=CustomLLM())
hfemb = HuggingFaceEmbeddings()
embed_model = LangchainEmbedding(hfemb)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
storage_context = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir="index_saved"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir="index_saved"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir="index_saved"),
)
index = load_index_from_storage(storage_context, service_context=service_context)
return index
index = return_index()
query_engine = index.as_query_engine()
if prompt:
response = query_engine.query(prompt)
st.write(response)
# llm_predictor = LLMPredictor(llm=CustomLLM())
# hfemb = HuggingFaceEmbeddings()
# embed_model = LangchainEmbedding(hfemb)
# service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
# storage_context = StorageContext.from_defaults(
# docstore=SimpleDocumentStore.from_persist_dir(persist_dir="index_saved"),
# vector_store=SimpleVectorStore.from_persist_dir(persist_dir="index_saved"),
# index_store=SimpleIndexStore.from_persist_dir(persist_dir="index_saved"),
# )
# index = load_index_from_storage(storage_context, service_context=service_context)
# query_engine = index.as_query_engine()
# if prompt:
# response = query_engine.query(prompt)
# st.write(response)
| hastinmodi/Ramayana_GPT | streamlit_app.py | streamlit_app.py | py | 2,898 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.title",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "langchain.llms.base.LLM",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "transfor... |
17087660357 | from nornir import InitNornir
from nornir.plugins.tasks import networking, text
from nornir.plugins.functions.text import print_result
from nornir.core.filter import F
from nornir.core.task import Result
from ciscoconfparse import CiscoConfParse
from pathlib import Path
import ipdb
import re
import time
CONFIG_PATH = "nxos/"
TEMPLATES = f"{CONFIG_PATH}/templates/"
PRE_DEPLOY = "pre_deployment"
POST_DEPLOY = "post_deployment"
def show_interfaces(task):
"""
Nornir task that gets the output from "show ip int brief" to see if
routed interfaces and loopbacks are present and configured correctly.
Marks interfaces that have been altered from desired state.
If an interface has been marked then the interface configurations get
sent to the function interface_checker for further inspection.
Args:
task: nornir task object
"""
cmd = "show ip int brief"
int_brief = task.run(
networking.netmiko_send_command, command_string=cmd, use_genie=True
)
result = int_brief[0].result
# Check if zero interfaces configured
if type(result) == str:
changed = True
result = f"{task.host.name}: adding new interfaces"
# Check if interfaces are configured correctly
elif type(result) != str:
int_checker_result = task.run(task=interface_checker, int_config=result)
changed = int_checker_result.changed
result = int_checker_result.result
else:
changed = False
result = f"{task.host.name}: no interface changes"
return Result(host=task.host, result=result, changed=changed)
def interface_checker(task, int_config):
"""
Nornir task that compares the interface configuration against
the host file in inventory. This function checks to make sure
that the interface is:
- admin-up
- has the correct ip addresss
Args:
task: nornir task object
int_config: The output of "show ip int brief" in a dictionary
"""
changed = False
result = ""
for intf in task.host.data["interfaces"]:
check_interface = intf["int_name"]
if (
check_interface in int_config["interface"]
and "admin-up"
in int_config["interface"][check_interface]["interface_status"]
and intf["ip_address"]
== int_config["interface"][check_interface]["ip_address"]
):
pass
else:
changed = True
result = f"{task.host.name}: correcting 1 or more interface configuration"
return Result(host=task.host, changed=changed, result=result)
def render_int_config(task):
"""
Nornir task that renders a jinja template and produce a interface
configuration with values from inventory.
Args:
nornir task object
"""
task.run(
task=text.template_file, template="int.j2", path=TEMPLATES, **task.host
)
def config_interface(task):
"""
Nornir task that uses Naplam to configure the interfaces on the host device.
The rendered interface configurations are merged to the device.
Args:
task: nornir task object
"""
config = task.run(task=render_int_config)
config = config[-1].result
task.run(task=networking.napalm_configure, configuration=config)
def set_config_flags(task):
"""
Nornir task to ensure that config markers or "flags" are present in the
current configuration.
For prefix-lists, if a "PL_BGP_" isn't found in the output of "show ip prefix-list",
then a bogus prefix list will be created.
For route-maps, if a "RM_BGP_" isn't found in the output of "show route-maps",
then a bogus route-map will be created.
Args:
task: nornir task object
"""
show_prefix = task.run(
task=networking.netmiko_send_command,
command_string="show ip prefix-list | i PL_BGP_",
)
show_map = task.run(
task=networking.netmiko_send_command,
command_string="show route-map | i RM_BGP_",
)
config_prefix = "ip prefix-list PL_BGP_BOGUS permit 1.1.1.1/32"
config_map = "route-map RM_BGP_BOGUS"
# Create bogus prefix-list if none exist
if not show_prefix.result:
task.run(task=networking.netmiko_send_config, config_commands=config_prefix)
# Create bogus route-map if none exists
if not show_map.result:
task.run(task=networking.netmiko_send_config, config_commands=config_map)
# Add remaining base configs for bgp
bgp_base_config = task.run(
task=text.template_file, template="base_config.j2", path=TEMPLATES, **task.host
)
bgp_base_config = bgp_base_config.result
task.run(task=networking.napalm_configure, configuration=bgp_base_config)
def get_checkpoint(task):
"""
Nornir task that retreives the current checkpoint from the device.
Args:
task: nornir task object
"""
napalm_connect = task.host.get_connection("napalm", task.nornir.config)
checkpoint = napalm_connect._get_checkpoint_file()
task.host["checkpoint"] = checkpoint
def save_backup(task, config_type):
"""
Nornir task that takes the current checkpoint and saves it locally.
Args:
task: nornir task object
"""
Path(f"{CONFIG_PATH}backups").mkdir(parents=True, exist_ok=True)
with open(f"{CONFIG_PATH}backups/{task.host}_checkpoint_{config_type}", "w") as f:
f.write(task.host["checkpoint"])
def render_configs(task):
"""
Nornir task that uses jinja templating to renders configurations for:
- bgp configurations
- route-maps
- prefix-lists
Args:
task: nornir task object
"""
bgp_rendered = task.run(
task=text.template_file, template="bgp_config.j2", path=TEMPLATES, **task.host
)
prefix_rendered = task.run(
task=text.template_file, template="prefix_list.j2", path=TEMPLATES, **task.host
)
map_rendered = task.run(
task=text.template_file, template="route_map.j2", path=TEMPLATES, **task.host
)
task.host["bgp_rendered"] = bgp_rendered.result
task.host["prefix_rendered"] = prefix_rendered.result
task.host["map_rendered"] = map_rendered.result
def merge_configs(task, search_str, rendered_conf):
"""
Nornir task to search and replace any configurations that deviate from the
desired state. CiscoConfParse and used to find the config "flag"
and regex is used to update the config with rendered configurations.
Args:
serach_str: regex used to search configurations
rendered_conf: config to replace the current configurations
task: nornir task object
"""
parse = CiscoConfParse(
f"{CONFIG_PATH}backups/{task.host}_checkpoint_{PRE_DEPLOY}",
syntax="nxos",
factory=True,
)
parse_text = ""
changed = False
# Parse current config
for parent_OBJ in parse.find_objects(search_str):
parse_text += f"\n{parent_OBJ.parent.text}"
try:
for child_OBJ in parent_OBJ.all_children:
parse_text += f"\n{child_OBJ.text}"
except AttributeError:
pass
parse_text = parse_text.strip()
# Comapare current config and rendered configs
if parse_text == task.host[rendered_conf]:
pass
else:
updated_config = re.sub(
parse_text, task.host[rendered_conf], task.host["checkpoint"]
)
changed = True
task.host["checkpoint"] = updated_config
return Result(host=task.host, changed=changed)
def push_configs(task):
"""
Nornir task that deploys the updated config only if there were changes made.
Args:
task: nornir task object
"""
with open(f"{CONFIG_PATH}backups/{task.host}_checkpoint_{POST_DEPLOY}") as f:
cfg_file = f.read()
check_config = task.run(
task=networking.napalm_configure,
replace=True,
configuration=cfg_file,
dry_run=True,
)
# Check for changes before replacing config
if check_config[0].diff == "":
pass
else:
push_config = task.run(
task=networking.napalm_configure, replace=True, configuration=cfg_file
)
def validate_bgp(task):
"""
Nornir task that uses Napalm to retrieve the device output from
"show bgp ipv4 unicast neighbors" and determines if the peer is up"
Args:
task: nornir task object
"""
bgp_result = task.run(task=networking.napalm_get, getters=["bgp_neighbors"])
print("*" * 80)
for peer in task.host["bgp"]["neighbors"]:
bgp_peer = peer["remote_peer"]
if not bgp_result.result["bgp_neighbors"]["global"]["peers"][bgp_peer]["is_up"]:
print(f"Failed, BGP peer {bgp_peer} is not up...")
else:
print(f"Success, BGP peer {bgp_peer} is up!")
print("*" * 80)
print()
def main():
nr = InitNornir(config_file="config.yaml")
# nr = nr.filter(name="nxos1")
nr = nr.filter(F(groups__contains="nxos"))
# Configure interfaces
check_int_results = nr.run(task=show_interfaces)
for _, hosts in enumerate(nr.inventory.hosts):
if check_int_results[hosts].changed:
print(check_int_results[hosts].result)
config_int_results = nr.run(task=config_interface)
print_result(config_int_results)
else:
print_result(check_int_results)
# Set config flags
flag_results = nr.run(task=set_config_flags)
print_result(flag_results)
# Get checkpoint
checkpoint_results = nr.run(task=get_checkpoint)
print_result(checkpoint_results)
# Save config locally
pre_deploy_config = nr.run(task=save_backup, config_type=PRE_DEPLOY)
print_result(pre_deploy_config)
# Render configs
render_results = nr.run(task=render_configs)
print_result(render_results)
# Merge config
merge_dict = [
{"search": "RM_BGP\w+ permit|deny", "rendered": "map_rendered"},
{"search": "PL_BGP.* permit|deny", "rendered": "prefix_rendered"},
{"search": "router bgp 22", "rendered": "bgp_rendered"},
]
for parse in merge_dict:
merge_results = nr.run(
task=merge_configs,
search_str=parse["search"],
rendered_conf=parse["rendered"],
)
print_result(merge_results)
# Save updated config to disk
deploy_config_results = nr.run(task=save_backup, config_type=POST_DEPLOY)
print_result(deploy_config_results)
# Push configs
push_config_results = nr.run(task=push_configs)
print_result(push_config_results)
# Validate BGP
for i in range(5, 0, -1):
time.sleep(1)
nr.run(task=validate_bgp)
if __name__ == "__main__":
main()
| bfernando1/nornir-automation | week8/bgp_tool.py | bgp_tool.py | py | 10,822 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nornir.plugins.tasks.networking.netmiko_send_command",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "nornir.plugins.tasks.networking",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "nornir.core.task.Result",
"line_number": 50,
"u... |
18030830671 | #%%
# Analysis example
import glob
import os
import sys
from importlib import reload
sys.path.append('/data/git_repositories_py/SCRAPC/')
sys.path.append('/data/git_repositories_py/fUS_pytools/')
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
import scrapc_analysis as scana
import scrapc_io as scio
import scrapc_viz as scviz
import scrapc_npix as scnpix
import scrapc_imfilters as scfilters
from scipy.ndimage import gaussian_filter, median_filter
from scipy.ndimage.filters import median_filter
from scipy.stats import zscore
from matplotlib import cm
from matplotlib.colors import ListedColormap
from matplotlib.patches import Rectangle
import seaborn as sns
from PIL import Image
sns.set_style('white')
sns.set_style("ticks")
dir_aug26 = '/data/fUS_project/data/data_aug26/extras/'
dir_aug27 = '/data/fUS_project/data/data_aug27/extras/'
d1 = np.load(dir_aug26 + 'data_processed.npy')
d2 = np.load(dir_aug27 + 'data_processed.npy')
comb1 = np.load(dir_aug26 + 'interpolated_trial_average_aug26.npy')
comb2 = np.load(dir_aug27 + 'interpolated_trial_average_aug27.npy')
nX = 128
nY = 52
combined_full = np.zeros([1200, 4*52, 10*nX])
for i in range(10):
if np.mod(i, 2) == 0:
combined_full[:, :, nX*i:nX*(i+1)] = comb1[:, :, int((i/2))*nX : int(((i/2 + 1))*nX)]
else:
combined_full[:, :, nX*i:nX*(i+1)] = comb2[:, :, int(i/2)*nX : (int(i/2) + 1)*nX]
f0 = combined_full.mean(axis = 0)
for kk in range(1200):
combined_full[kk, :, :] = combined_full[kk, :, :] - f0;
# plt.figure(figsize = [12, 4])
# plt.imshow(combined_full[600, :52, 128*4:128*8])
# plt.grid(True)
#% Combined for each trial type
combined_resamp_stim = np.zeros([10, 80, 4*52, 10*nX])
for sl in range(10):
for cnt, stim_type in enumerate(['az_LR', 'az_RL', 'ev_UD', 'ev_DU']):
if np.mod(sl, 2) == 0:
tmp = d1[int(sl/2)]['trials_all'][stim_type]
else:
tmp = d2[int(sl/2)]['trials_all'][stim_type]
for trN in range(10):
tmp2 = tmp[trN*80:(trN+1)*80, :, :].copy();
f0 = tmp2[:6, :, :].mean(axis = 0)
tmp2 = (tmp2-f0)/f0 # DFF
combined_resamp_stim[trN, :, cnt*52:(cnt+1)*52, sl*128:(sl+1)*128] = tmp2
plt.figure(figsize = [20, 5])
plt.imshow(combined_resamp_stim.mean(axis=0)[40, :, :])
# Load anatimical
raw_fn = '/data/fUS_project/data/data_aug27/extras/RETINOTOPY_cumulative_trials_RAW_linear_interp_of_artifacts.tiff'
raw_data = scio.load_multitiff(raw_fn, first_n=100)
shapiro_out_dir = '/data/fUS_project/visualization/shapiro_lab_meeting'
#%% First make progressive grids
def gen_masks(step=8, n_masks=7, y_offset = 5, x_offset=5, im = np.zeros([52, 128]), vlim = [0, 1.5e10]):
# Generates N masks with size step spaced over a certain range
plt.imshow(im, vmin = vlim[0], vmax = vlim[1], cmap = 'binary'); #plt.colorbar();
plt.axis('off')
masks = []
colors = plt.cm.Reds(np.linspace(0.2,1,n_masks))
r = n_masks-1; # For computing Y spacing which is overlapping
for i in range(n_masks):
#reg_mask = np.zeros([52, 128])
y_start = int(y_offset + (r-i)*20/r)
y_end = int(y_start + step);
x_start = int(x_offset + i*step);
x_end = int(x_start + step)
curr_rect = Rectangle((x_start, y_start), step, step, fill = None, color = colors[i])
plt.gca().add_patch(curr_rect)
masks.append((y_start, y_end, x_start, x_end, curr_rect, colors[i]))
print(y_start, y_end, x_start, x_end)
return masks, colors
im_in = raw_data[:, 128:256, :].mean(axis=-1)
masks, colors = gen_masks(step=8, n_masks=7, y_offset = 5, x_offset=5, im = im_in, vlim =[0, 1.5e10])
#%% Now go through and for each one extract a timecourse from the first type of trial
n_masks = len(masks)
timecourse = np.zeros([n_masks, 10, 4, 1200])
for sl in range(10):
for exp in range(4):
for ii in range(n_masks):
y0, y1, x0, x1, _, c = masks[ii]
timecourse[ii, sl, exp, :] = combined_full[:, 52*exp+y0:52*exp+y1, 128*sl+x0:128*sl+x1].mean(-1).mean(-1)
# Plots
plt.figure(figsize = [20, 40])
for sl in range(10):
for exp in range(4):
plt.subplot(10,4,4*sl+exp+1)
for ii in range(n_masks):
plt.plot(timecourse[ii, sl, exp, :], color = colors[ii])
sns.despine()
plt.ylim([-0.2, 1])
#%% Now with the resamp at stim
comb_stim = combined_resamp_stim.mean(axis=0)
timecourse2 = np.zeros([n_masks, 10, 4, 80])
for sl in range(10):
for exp in range(4):
for ii in range(n_masks):
y0, y1, x0, x1, _, c = masks[ii]
timecourse2[ii, sl, exp, :] = comb_stim[:, 52*exp+y0:52*exp+y1, 128*sl+x0:128*sl+x1].mean(-1).mean(-1)
# Plots
plt.figure(figsize = [20, 40])
for sl in range(10):
for exp in range(4):
plt.subplot(10,4,4*sl+exp+1)
for ii in range(n_masks):
plt.plot(timecourse2[ii, sl, exp, :], color = colors[ii])
#%% Now with individual trials but only do this for a single slice.....
#%% TIMECOURSES OF RETINOTOPY
#######################################
#######################################
#######################################
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages(shapiro_out_dir + '/slices_timecourse_from_retinotopy.pdf') as pdf_pointer:
im_in = raw_data[:, 128:256, :].mean(axis=-1)
masks, colors = gen_masks(step=8, n_masks=7, y_offset = 5, x_offset=5, im = im_in, vlim =[0, 1.5e10])
pdf_pointer.savefig(plt.gcf())
for slice_no in range(10):
comb_stim = combined_resamp_stim[:, :, :, 128*slice_no:128*(slice_no+1)]
timecourse_trials = np.zeros([n_masks, 4, 10, 80]) # Now the 10 is nTrials instead of nSlices
for exp in range(4):
for ii in range(n_masks):
y0, y1, x0, x1, _, c = masks[ii]
tmp = comb_stim[:, :, 52*exp+y0:52*exp+y1, x0:x1].mean(axis=-1).mean(axis=-1)
timecourse_trials[ii, exp, :, :] = tmp
# # Plots
plt.figure(figsize = [20, 10]); plt.suptitle('slice %d'% slice_no)
for ii in range(n_masks):
for exp in range(4):
plt.subplot(4,n_masks, exp*n_masks+ii+1)
sns.tsplot(timecourse_trials[ii, exp, :, :], time = np.linspace(0, 40, 80), color = colors[ii])
sns.despine(); plt.ylim([-0.2, 1])
if exp == 3:
plt.xlabel('time (sec)')
if ii == 0:
plt.ylabel('% signal change')
pdf_pointer.savefig(plt.gcf(), facecolor= 'white') # or you can pass a Figure object to pdf.savefig
plt.close()
#%%
###################################
####################################
#% FULL FIELD%
# NOW DO FULL FIELD ANALYSIS
data_fn = '/data/fUS_project/data/data_aug29/extras/combined_single_trial.npy'
data_full_fn = '/data/fUS_project/data/data_aug29/extras/data_processed_fullfield_aug29.npy'
test = np.load(data_fn)
d_FF = np.load(data_full_fn)
nSlices = 14
comb_stim = test
rep_im = d_FF[7]['data_raw_fix'].mean(0).T
masks, colors = gen_masks(step=8, n_masks=7, y_offset = 10, x_offset=10, im = rep_im, vlim =[0, 8e9])
#%%
# 40 frames per trial, 10 trials
#d_FF[0]['data_resample_at_stim'].reshape([40, 10, 52, 128])
timecourseFF = np.zeros([n_masks, nSlices, 10, 40])
for sl in range(nSlices):
for ii in range(n_masks):
y0, y1, x0, x1, _, c = masks[ii]
tmp = d_FF[sl]['data_resample_at_stim'].reshape([10, 40, 52, 128])
tmp = tmp[:, :, y0:y1, x0:x1].mean(-1).mean(-1)
f0 = tmp[:, :5].mean(-1)
tmp = (tmp.T - f0)/f0
timecourseFF[ii, sl, :, :] =tmp.T;
# timecourseFF[ii, sl, :, :] = comb_stim[:, y0:y1, 128*sl+x0:128*sl+x1].mean(-1).mean(-1)
# timecourseFF[ii, sl, :, :] = timecourseFF[ii, sl, :] - timecourseFF[ii, sl, :10].mean()
# # Plots
plt.figure(figsize = [20, 40])
for sl in range(nSlices):
for ii in range(n_masks):
plt.subplot(14,7,7*sl+ii+1)
sns.tsplot(timecourseFF[ii, sl, :, :], time = np.linspace(0, 20, 40), color = colors[ii])
sns.despine(); plt.ylim([-0.2, 1])
if sl == 13:
plt.xlabel('time (sec)')
if ii == 0:
plt.ylabel('% signal change')
with PdfPages(shapiro_out_dir + '/FULLFIELD_perslice_timecourse.pdf') as pdf_pointer:
pdf_pointer.savefig(plt.gcf(), facecolor= 'white')
plt.figure()
masks, colors = gen_masks(step=8, n_masks=7, y_offset = 10, x_offset=10, im = rep_im, vlim =[0, 8e9])
pdf_pointer.savefig(plt.gcf(), facecolor= 'white')
###################################3
#%% NOW DO OBJECTS VS SCRAMBLED
###################################3###################################3
###################################3###################################3
###################################3###################################3
data_fn = '/data/fUS_project/data/data_sep03/extras_fullfield/data_processed_fullfield_sep13.npy'
d_OS_FF = np.load(data_fn)
data_fn = '/data/fUS_project/data/data_sep03/extras_object_scram/data_processed_objects_scram_sep03.npy'
d_OS = np.load(data_fn)
rep_im = d_OS_FF[2]['data_raw_fix'].mean(0).T
masks, colors = gen_masks(step=8, n_masks=7, y_offset = 10, x_offset=10, im = rep_im, vlim =[0, 8e9])
n_masks = 7
#%% Plot the full field
nSlices = 3
timecourseFF = np.zeros([n_masks, nSlices, 10, 40])
for sl in range(nSlices):
for ii in range(n_masks):
y0, y1, x0, x1, _, c = masks[ii]
# Cut out first 30 seconds
tmp = d_OS_FF[sl]['data_resample_at_stim'][60:, :, :] # Make suyre it is the one from obj-scram
tmp = tmp.reshape([10, 40, 52, 128])
tmp = tmp[:, :, y0:y1, x0:x1].mean(-1).mean(-1)
f0 = tmp[:, :5].mean(-1)
tmp = (tmp.T - f0)/f0
timecourseFF[ii, sl, :, :] =tmp.T;
#% Plot the obj and scram conditions
timecourseOS = np.zeros([n_masks, nSlices, 10, 80])
for sl in range(nSlices):
for ii in range(n_masks):
y0, y1, x0, x1, _, c = masks[ii]
# Cut out first 30 seconds
tmp = d_OS[sl]['data_resample_at_stim'] # Make suyre it is the one from obj-scram
tmp = tmp.reshape([10, 80, 52, 128])
tmp = tmp[:, :, y0:y1, x0:x1].mean(-1).mean(-1)
f0 = tmp[:, :5].mean(-1)
tmp = (tmp.T - f0)/f0
timecourseOS[ii, sl, :, :] =tmp.T;
#%%
with PdfPages(shapiro_out_dir + '/OBJ_SCRAM_3slices_sweepMasks.pdf') as pdf_pointer:
rep_im = d_OS_FF[2]['data_raw_fix'].mean(0).T
for current_y in [3,6,9,12]:
plt.figure()
masks, colors = gen_masks(step=8, n_masks=7, y_offset = current_y, x_offset=10, im = rep_im, vlim =[0, 8e9])
plt.title('Y offset: %d and size: %d' % (current_y, 8))
pdf_pointer.savefig(plt.gcf(), facecolor= 'white')
# # Plots
plt.figure(figsize = [20, 40])
for sl in range(nSlices):
for ii in range(n_masks):
plt.subplot(14,7,7*sl+ii+1)
sns.tsplot(timecourseFF[ii, sl, :, :], time = np.linspace(0, 20, 40), color = colors[ii])
sns.despine(); plt.ylim([-0.2, 0.8])
if sl == 2:
plt.xlabel('time (sec)')
if ii == 0:
plt.ylabel('% signal change')
#plt.suptitle('3 slices full field')
pdf_pointer.savefig(plt.gcf(), facecolor= 'white')
# # Plots
plt.figure(figsize = [20, 40])
for sl in range(nSlices):
for ii in range(n_masks):
plt.subplot(14,7,7*sl+ii+1)
sns.tsplot(timecourseOS[ii, sl, :, :], time = np.linspace(0, 40, 80), color = colors[ii])
sns.despine(); plt.ylim([-0.2, 0.8])
if sl == 2:
plt.xlabel('time (sec)')
if ii == 0:
plt.ylabel('% signal change')
#plt.suptitle('objects_v_scrambled')
pdf_pointer.savefig(plt.gcf(), facecolor= 'white')
plt.figure(figsize = [20, 40])
for sl in range(nSlices):
for ii in range(n_masks):
plt.subplot(14,7,7*sl+ii+1)
sns.tsplot(timecourseOS[ii, sl, :, :40], time = np.linspace(0, 20, 40), color = [0.5, 0, 0])
sns.tsplot(timecourseOS[ii, sl, :, 40:], time = np.linspace(0, 20, 40), color = [0, 0.5, 0])
sns.despine(); plt.ylim([-0.2, 0.8])
if sl == 2:
plt.xlabel('time (sec)')
if ii == 0:
plt.ylabel('% signal change')
pdf_pointer.savefig(plt.gcf(), facecolor= 'white')
plt.figure(figsize = [20, 40])
for sl in range(nSlices):
for ii in range(n_masks):
plt.subplot(14,7,7*sl+ii+1)
sns.tsplot(timecourseOS[ii, sl, :, :40], time = np.linspace(0, 20, 40), color = [0.5, 0, 0])
sns.tsplot(timecourseOS[ii, sl, :, 40:], time = np.linspace(0, 20, 40), color = [0, 0.5, 0])
sns.despine(); plt.ylim([-0.2, 0.8])
if sl == 2:
plt.xlabel('time (sec)')
if ii == 0:
plt.ylabel('% signal change')
| fluongo/fUS_pyTools | analysis_scripts/09_12_19_Shapiro_lab_meeting.py | 09_12_19_Shapiro_lab_meeting.py | py | 13,271 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
682547229 | import sys
import pygame
from pygame import gfxdraw
from utils.mathhelper import clamp, is_inside_radius
from utils.curves import Bezier
class GUI:
def __init__(self, width, height, offset_x=0, offset_y=0):
pygame.init()
self.offset_x = offset_x
self.offset_y = offset_y
self.size = self.width, self.height = width + 2 * offset_x, height + 2 * offset_y
GUI.screen = pygame.display.set_mode(self.size)
GUI.play_area = pygame.Surface((width, height))
GUI.mouse = pygame.mouse.get_pos()
GUI.click = pygame.mouse.get_pressed()
GUI.keys = pygame.key.get_pressed()
GUI.clock = pygame.time.Clock()
GUI.hitcircles = []
GUI.elements = []
GUI.cursor = Cursor((0, 0))
GUI.is_holding_down_key = [0] * len(GUI.keys)
GUI.is_single_press_key = [0] * len(GUI.keys)
GUI.is_holding_down = False
GUI.is_single_click = False
GUI.single_press_events = []
GUI.holding_down_events = []
def mouse_events(self):
GUI.mouse = pygame.mouse.get_pos()
GUI.click = pygame.mouse.get_pressed()
if GUI.click[0] == 1:
if not GUI.is_holding_down:
GUI.is_single_click = True
GUI.is_holding_down = True
else:
GUI.is_single_click = False
else:
GUI.is_single_click = False
GUI.is_holding_down = False
def keyboard_events(self):
GUI.keys = pygame.key.get_pressed()
for i in range(len(GUI.keys)):
if GUI.keys[i]:
if not GUI.is_holding_down_key[i]:
GUI.is_single_press_key[i] = True
GUI.is_holding_down_key[i] = True
else:
GUI.is_single_press_key[i] = False
else:
GUI.is_single_press_key[i] = False
GUI.is_holding_down_key[i] = False
def handle_key_events(self):
for keys, event in GUI.single_press_events:
for key in keys:
if not GUI.is_single_press_key[key]:
break
else:
event()
for keys, event in GUI.holding_down_events:
for key in keys:
if not GUI.is_holding_down_key[key]:
break
else:
event()
def add_single_press_event(self, keys, event):
GUI.single_press_events.append((keys, event))
def add_holding_down_event(self, keys, event):
GUI.holding_down_events.append((keys, event))
def draw(self):
self.mouse_events()
self.keyboard_events()
self.handle_key_events()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.screen.fill((20, 20, 20))
self.play_area.fill((0, 0, 0))
for i in GUI.hitcircles:
i.display()
GUI.cursor.display()
GUI.screen.blit(GUI.play_area, (self.offset_x, self.offset_y))
for i in GUI.elements:
i.display()
pygame.display.flip()
class OSU(GUI):
def __init__(self, current_frame, is_hardrock=False):
OSU.current_frame = current_frame
OSU.is_hardrock = is_hardrock
def set_current_frame(self, current_frame):
OSU.current_frame = current_frame
class Hitcircle(OSU):
def __init__(self, x, y, time, radius=50, color=(255, 0, 0)):
self.x = int(x)
self.y = int(y)
self.radius = int(radius)
self.time = time
self.color = color
osu_width, osu_height = (512, 384)
play_area_width, play_area_height = GUI.play_area.get_size()
self.offset_width, self.offset_height = (
(play_area_width - osu_width) // 2, (play_area_height - osu_height) // 2)
GUI.hitcircles.append(self)
def set_position(self, x, y):
self.x = int(x)
self.y = int(y)
def set_color(self, color):
self.color = color
def cleanup(self):
GUI.hitcircles.remove(self)
del self
def set_time(self, i):
self.time = i
def set_current_time(self, i):
self.current_time = i
def display(self):
if not (0 < (self.time - OSU.current_frame.time) < 450):
return
if is_inside_radius(
(OSU.current_frame.x, OSU.current_frame.y), (self.x, self.y),
self.radius):
self.set_color((0, 255, 0))
else:
self.set_color((255, 0, 0))
# Approach circle
for i in range(2): # <---- line width
gfxdraw.aacircle(
GUI.play_area,
self.x + self.offset_width,
self.y + self.offset_height,
self.radius + i +
int(min((self.time - OSU.current_frame.time) / 5, 90)),
(255, 255, 255))
# Hit Circle
gfxdraw.aacircle(
GUI.play_area,
self.x + self.offset_width,
self.y + self.offset_height,
self.radius,
self.color)
gfxdraw.filled_circle(GUI.play_area, self.x + self.offset_width, self.y + self.offset_height,
self.radius, self.color)
gfxdraw.aacircle(
GUI.play_area,
self.x + self.offset_width,
self.y + self.offset_height,
2,
(255, 255, 255))
class Hitobject_Slider(OSU):
def __init__(self, control_points, circle_radius, time, duration,
show_control_points=False, color=(255, 0, 0)):
self.control_points = control_points
self.bezier = Bezier(control_points)
self.circle_radius = circle_radius
self.color = color
self.time = time
self.duration = duration
self.show_control_points = show_control_points
GUI.hitcircles.append(self)
self.n = 0
def set_control_points(self, control_points):
self.bezier = Bezier(control_points)
def display(self):
if not (0 - self.duration < (self.time - OSU.current_frame.time) < 450):
return
play_area_width, play_area_height = GUI.play_area.get_size()
# offset between play area and GUI surface
self.offset_width = (play_area_width - 512) // 2
# offset between play area and GUI surface
self.offset_height = (play_area_height - 384) // 2
for i in [self.bezier.pos[0], self.bezier.pos[len(
self.bezier.pos) // 2], self.bezier.pos[-1]]:
a = i
gfxdraw.aacircle(
GUI.play_area,
int(a.x) + self.offset_width,
int(a.y) + self.offset_height,
int(self.circle_radius),
(0, 0, 255))
l1 = []
l2 = []
for i in range(1, len(self.bezier.pos), 10):
diffx = self.bezier.pos[i].x - self.bezier.pos[i - 1].x
diffy = self.bezier.pos[i].y - self.bezier.pos[i - 1].y
slope = diffy / diffx
b = pow(pow(self.circle_radius, 2) / (pow(slope, 2) + 1), 0.5)
a = -slope * b
l1.append(
(self.bezier.pos[i].x +
a +
self.offset_width,
self.bezier.pos[i].y +
b + self.offset_height))
l2.append(
(self.bezier.pos[i].x -
a +
self.offset_width,
self.bezier.pos[i].y -
b + self.offset_height))
pygame.draw.aalines(
GUI.play_area,
pygame.Color("cyan"),
False,
l1,
3)
pygame.draw.aalines(GUI.play_area, pygame.Color("gray"), False, [
(i.x + self.offset_width, i.y + self.offset_height) for i in self.bezier.pos], 3)
pygame.draw.aalines(
GUI.play_area,
pygame.Color("cyan"),
False,
l2,
3)
if self.show_control_points:
for i in self.control_points:
pygame.draw.circle(GUI.play_area, (255, 0, 0),
(i.x, i.y), 5, 1)
class Cursor(GUI):
def __init__(self, position: tuple, trail_points: list = [],
path_points: list = [], show_path=False, show_markers=True):
self.x = int(position[0])
self.y = int(position[1])
self.trail_points = trail_points
self.path_points = path_points
self.show_path = show_path
self.show_markers = show_markers
play_area_width, play_area_height = GUI.play_area.get_size()
# offset between play area and GUI surface
self.offset_width = (play_area_width - 512) // 2
# offset between play area and GUI surface
self.offset_height = (play_area_height - 384) // 2
GUI.cursor = self
def toggle_show_markers(self):
self.show_markers = not self.show_markers
def set_cursor_position(self, x, y):
self.x = int(x)
self.y = int(y)
def set_trail_points(self, trail_points):
self.trail_points = [
(point[0] + self.offset_width,
point[1] + self.offset_height) for point in trail_points]
def set_path_points(self, path_points):
self.path_points = [
(point[0] + self.offset_width,
point[1] + self.offset_height) for point in path_points]
def display(self):
pygame.draw.circle(GUI.play_area, (0, 255, 255),
(self.x + self.offset_width, self.y + self.offset_height), 5, 2)
if len(self.trail_points) > 1:
pygame.draw.aalines(GUI.play_area, (0, 0, 255),
False, self.trail_points)
if len(self.path_points) > 1 and self.show_path:
pygame.draw.aalines(GUI.play_area, (255, 0, 255),
False, self.path_points)
if self.show_markers:
for i in self.trail_points:
a = 3
pygame.draw.line(GUI.play_area, (255, 255, 0),
(i[0] + a, i[1] + a), (i[0] - a, i[1] - a))
pygame.draw.line(GUI.play_area, (255, 255, 0),
(i[0] - a, i[1] + a), (i[0] + a, i[1] - a))
for i in self.path_points:
a = 3
pygame.draw.line(GUI.play_area, (255, 255, 255),
(i[0] + a, i[1] + a), (i[0] - a, i[1] - a))
pygame.draw.line(GUI.play_area, (255, 255, 255),
(i[0] - a, i[1] + a), (i[0] + a, i[1] - a))
class Button(GUI):
def __init__(self, x, y, width, height, text, on_click=0):
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
self.on_click = on_click
self.font = pygame.font.Font("NotoSans-Black.ttf", 15)
GUI.elements.append(self)
def set_text(self, text):
self.text = text
def display(self):
if (self.x + self.width >
GUI.mouse[0] > self.x and self.y + self.height > GUI.mouse[1] > self.y):
pygame.draw.rect(GUI.screen, (220, 220, 220),
(self.x, self.y, self.width, self.height))
if GUI.is_single_click and self.on_click is not None:
self.on_click()
else:
pygame.draw.rect(GUI.screen, (255, 255, 255),
(self.x, self.y, self.width, self.height))
text_surface = self.font.render(self.text, True, (0, 0, 0))
text_rect = text_surface.get_rect()
text_rect.center = ((self.x + (self.width / 2)),
(self.y + (self.height / 2)))
GUI.screen.blit(text_surface, text_rect)
class Slider(GUI):
def __init__(self, x, y, width, height, value, max_value):
self.x = x
self.y = y
self.width = width
self.height = height
self.value = value
self.max_value = max_value
self.drag_origin_x = 0
self.drag_origin_y = 0
self.is_dragging_ball = False
self.is_setting_value = False
GUI.elements.append(self)
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def check_mouse_on_ball(self, x, y):
diff = pow(x - GUI.mouse[0], 2) + pow(y - GUI.mouse[1], 2)
return (pow(diff, 0.5) < 7)
def check_mouse_on_slider(self):
if self.x + self.width * \
2 > GUI.mouse[0] > self.x and self.y + self.height * 2 > GUI.mouse[1] > self.y:
return True
else:
return False
def display(self):
circle_origin_x = self.x + \
int(self.width * (self.value / self.max_value))
circle_origin_y = self.y + int(self.height / 2)
pygame.draw.rect(GUI.screen, (255, 255, 255),
(self.x, self.y, self.width, self.height))
if self.check_mouse_on_ball(circle_origin_x, circle_origin_y):
if GUI.is_single_click:
self.is_dragging_ball = True
self.drag_origin_x = GUI.mouse[0]
self.drag_origin_y = GUI.mouse[1]
if self.is_dragging_ball and GUI.is_holding_down:
circle_origin_x = GUI.mouse[0]
circle_origin_x = clamp(
circle_origin_x, self.x, self.x + self.width)
self.value = (circle_origin_x - self.x) * \
self.max_value / self.width
else:
self.is_dragging_ball = False
if self.check_mouse_on_slider():
if GUI.is_single_click:
circle_origin_x = GUI.mouse[0]
circle_origin_x = clamp(
circle_origin_x, self.x, self.x + self.width)
self.value = (circle_origin_x - self.x) * \
self.max_value / self.width
self.is_dragging_ball = True
ball_size = 8 if self.is_dragging_ball else 7
gfxdraw.aacircle(GUI.screen, circle_origin_x,
circle_origin_y, ball_size, (0, 255, 255))
gfxdraw.filled_circle(GUI.screen, circle_origin_x,
circle_origin_y, ball_size, (0, 255, 255))
gfxdraw.aacircle(GUI.screen, circle_origin_x,
circle_origin_y, ball_size - 2, (0, 0, 0))
if self.is_dragging_ball:
gfxdraw.filled_circle(GUI.screen, circle_origin_x,
circle_origin_y, ball_size - 2, (0, 0, 0))
class TextBox(GUI):
def __init__(self, x, y, width, height, text):
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
self.font_size = 10
self.font = pygame.font.Font("NotoSans-Black.ttf", self.font_size)
GUI.elements.append(self)
def set_text(self, text):
self.text = text
def clear(self):
self.text = ""
def display(self):
label = self.font.render(self.text, True, (255, 255, 255))
rect = label.get_rect()
rect.center = ((self.x + (self.width / 2)),
(self.y) + (self.height / 2))
GUI.screen.blit(label, rect)
class DebugBox(GUI):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.texts = []
self.font_size = 10
self.font = pygame.font.Font("NotoSans-Black.ttf", self.font_size)
GUI.elements.append(self)
def add_text(self, text):
self.texts.append(text)
def clear(self):
self.texts = []
def display(self):
labels = []
label_rects = []
pygame.draw.rect(GUI.screen, (255, 255, 255),
(self.x, self.y, self.width, self.height), 1)
for index, text in enumerate(self.texts):
tmp_label = self.font.render(text, True, (255, 255, 255))
tmp_rect = tmp_label.get_rect()
tmp_rect.center = ((self.x + (self.width / 2)),
(self.y) + 10 + (index * self.font_size))
labels.append(tmp_label)
label_rects.append(tmp_rect)
for i in range(len(labels)):
GUI.screen.blit(labels[i], label_rects[i])
| burkap/osu-replay-analyzer | utils/gui.py | gui.py | py | 16,562 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surfac... |
37202277848 | """
0x Web3 Utilities
author: officialcryptomaster@gmail.com
"""
from decimal import Decimal
from enum import Enum
from typing import Optional, Union
from eth_utils import keccak, to_checksum_address
from hexbytes import HexBytes
from zero_ex.json_schemas import assert_valid
from zero_ex.contract_artifacts import abi_by_name
from utils.miscutils import (
try_, assert_like_integer, now_epoch_msecs,
epoch_secs_to_local_time_str, epoch_msecs_to_local_time_str)
from utils.web3utils import (
Web3Client, TxParams, NetworkId,
get_clean_address_or_throw, NULL_ADDRESS)
ZX_CONTRACT_ADDRESSES = {
NetworkId.MAINNET: {
"exchange": "0x4f833a24e1f95d70f028921e27040ca56e09ab0b",
"erc20_proxy": "0x2240dab907db71e64d3e0dba4800c83b5c502d4e",
"erc721_proxy": "0x208e41fb445f1bb1b6780d58356e81405f3e6127",
"asset_proxy_owner": "0x17992e4ffb22730138e4b62aaa6367fa9d3699a6",
"forwarder": "0x5468a1dc173652ee28d249c271fa9933144746b1",
"order_validator": "0x9463e518dea6810309563c81d5266c1b1d149138",
"zrx_token": "0xe41d2489571d322189246dafa5ebde1f4699f498",
"ether_token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
},
NetworkId.ROPSTEN: {
"exchange": "0x4530c0483a1633c7a1c97d2c53721caff2caaaaf",
"erc20_proxy": "0xb1408f4c245a23c31b98d2c626777d4c0d766caa",
"erc721_proxy": "0xe654aac058bfbf9f83fcaee7793311dd82f6ddb4",
"asset_proxy_owner": "0xf5fa5b5fed2727a0e44ac67f6772e97977aa358b",
"forwarder": "0x2240dab907db71e64d3e0dba4800c83b5c502d4e",
"order_validator": "0x90431a90516ab49af23a0530e04e8c7836e7122f",
"zrx_token": "0xff67881f8d12f372d91baae9752eb3631ff0ed00",
"ether_token": "0xc778417e063141139fce010982780140aa0cd5ab",
},
NetworkId.RINKEBY: {
"exchange": "0xbce0b5f6eb618c565c3e5f5cd69652bbc279f44e",
"erc20_proxy": "0x2f5ae4f6106e89b4147651688a92256885c5f410",
"erc721_proxy": "0x7656d773e11ff7383a14dcf09a9c50990481cd10",
"asset_proxy_owner": "0xe1703da878afcebff5b7624a826902af475b9c03",
"forwarder": "0x2d40589abbdee84961f3a7656b9af7adb0ee5ab4",
"order_validator": "0x0c5173a51e26b29d6126c686756fb9fbef71f762",
"zrx_token": "0x8080c7e4b81ecf23aa6f877cfbfd9b0c228c6ffa",
"ether_token": "0xc778417e063141139fce010982780140aa0cd5ab",
},
NetworkId.KOVAN: {
"exchange": "0x35dd2932454449b14cee11a94d3674a936d5d7b2",
"erc20_proxy": "0xf1ec01d6236d3cd881a0bf0130ea25fe4234003e",
"erc721_proxy": "0x2a9127c745688a165106c11cd4d647d2220af821",
"asset_proxy_owner": "0x2c824d2882baa668e0d5202b1e7f2922278703f8",
"forwarder": "0x17992e4ffb22730138e4b62aaa6367fa9d3699a6",
"order_validator": "0xb389da3d204b412df2f75c6afb3d0a7ce0bc283d",
"zrx_token": "0x2002d3812f58e35f0ea1ffbf80a75a38c32175fa",
"ether_token": "0xd0a1e359811322d97991e03f863a0c30c2cf029c",
},
NetworkId.GANACHE: {
"exchange": "0x48bacb9266a570d521063ef5dd96e61686dbe788",
"erc20_proxy": "0x1dc4c1cefef38a777b15aa20260a54e584b16c48",
"erc721_proxy": "0x1d7022f5b17d2f8b695918fb48fa1089c9f85401",
"asset_proxy_owner": "0x34d402f14d58e001d8efbe6585051bf9706aa064",
"forwarder": "0xb69e673309512a9d726f87304c6984054f87a93b",
"order_validator": "0xe86bb98fcf9bff3512c74589b78fb168200cc546",
"zrx_token": "0x871dd7c2b4b25e1aa18728e9d5f2af4c4e431f5c",
"ether_token": "0x0b1ba0af832d7c05fd64161e0db78e85978e8082",
},
}
EIP191_HEADER = b"\x19\x01"
ERC20_PROXY_ID = '0xf47261b0'
ERC721_PROXY_ID = '0x02571792'
EIP712_DOMAIN_SEPARATOR_SCHEMA_HASH = keccak(
b"EIP712Domain(string name,string version,address verifyingContract)"
)
EIP712_ORDER_SCHEMA_HASH = keccak(
b"Order("
+ b"address makerAddress,"
+ b"address takerAddress,"
+ b"address feeRecipientAddress,"
+ b"address senderAddress,"
+ b"uint256 makerAssetAmount,"
+ b"uint256 takerAssetAmount,"
+ b"uint256 makerFee,"
+ b"uint256 takerFee,"
+ b"uint256 expirationTimeSeconds,"
+ b"uint256 salt,"
+ b"bytes makerAssetData,"
+ b"bytes takerAssetData"
+ b")"
)
EIP712_DOMAIN_STRUCT_HEADER = (
EIP712_DOMAIN_SEPARATOR_SCHEMA_HASH
+ keccak(b"0x Protocol")
+ keccak(b"2")
)
class ZxOrderStatus(Enum):
"""OrderStatus codes used by 0x contracts"""
INVALID = 0 # Default value
INVALID_MAKER_ASSET_AMOUNT = 1 # Order does not have a valid maker asset amount
INVALID_TAKER_ASSET_AMOUNT = 2 # Order does not have a valid taker asset amount
FILLABLE = 3 # Order is fillable
EXPIRED = 4 # Order has already expired
FULLY_FILLED = 5 # Order is fully filled
CANCELLED = 6 # Order has been cancelled
class ZxOrderInfo: # pylint: disable=too-few-public-methods
"""A Web3-compatible representation of the `Exchange.OrderInfo`."""
__name__ = "OrderInfo"
def __init__(
self,
zx_order_status,
order_hash,
order_taker_asset_filled_amount
):
"""Create an instance of Exchange.OrderInfo struct.
Args:
zx_order_status (:class:`ZxOrderStatus`): order status
order_hash (:class:`HexBytes` or bytes): order hash
order_taker_assert_filled_amount (int): order taker asset filled
amount
"""
self.zx_order_status = ZxOrderStatus(zx_order_status)
self.order_hash = HexBytes(order_hash)
self.order_taker_asset_filled_amount = int(order_taker_asset_filled_amount)
def __str__(self):
return (
f"[{self.__name__}]"
f"({try_(ZxOrderStatus, self.zx_order_status)}"
f", {self.order_hash.hex()}"
f", filled_amount={self.order_taker_asset_filled_amount})")
__repr__ = __str__
class ZxSignedOrder: # pylint: disable=too-many-public-methods
"""0x Signed Order model
This object will keep the database-friendly formats in member variables
starting with underscore, and provide property setter and getters for
getting the values in useful formats
"""
__name__ = "ZxSignedOrder"
def __init__(self, **kwargs):
"""Create an instance of ZxSignedOrder."""
# initialize all serialized (i.e. DB storable) columns as members
# ending with underscore.
self.hash_ = None
self.maker_address_ = None
self.taker_address_ = None
self.fee_recipient_address_ = None
self.sender_address_ = None
self.exchange_address_ = None
self.maker_asset_amount_ = None
self.taker_asset_amount_ = None
self.maker_fee_ = None
self.taker_fee_ = None
self.salt_ = None
self.expiration_time_seconds_ = None
self.maker_asset_data_ = None
self.taker_asset_data_ = None
self.signature_ = None
# book-keeping fields
self.created_at_msecs_ = None
self.bid_price_ = None
self.ask_price_ = None
self.sort_price_ = None
# assign keyword args and default values
self._created_at_msecs_ = kwargs.get("created_at_msecs") or now_epoch_msecs()
self.hash_ = kwargs.get("hash") or None
self.maker_address = kwargs.get("maker_address") or NULL_ADDRESS
self.taker_address = kwargs.get("taker_address") or NULL_ADDRESS
self.fee_recipient_address = kwargs.get("fee_recipient_address") or NULL_ADDRESS
self.sender_address = kwargs.get("sender_address") or NULL_ADDRESS
self.exchange_address = kwargs.get("exchange_address") or NULL_ADDRESS
self.maker_asset_amount = kwargs.get("maker_asset_amount") or "0"
self.taker_asset_amount = kwargs.get("taker_asset_amount") or "0"
self.taker_fee = kwargs.get("taker_fee") or "0"
self.maker_fee = kwargs.get("maker_fee") or "0"
self.salt = kwargs.get("salt") or "0"
# default expiry to one minute after creation
self.expiration_time_seconds = kwargs.get("expiration_time_seconds") \
or self._created_at_msecs_ / 1000. + 60
self.maker_asset_data = kwargs.get("maker_asset_data") or None
self.taker_asset_data = kwargs.get("taker_asset_data") or None
self.signature_ = kwargs.get("signature") or None
def __str__(self):
return (
f"[{self.__name__}]"
f"(hash={self.hash}"
f", maker_address={self.maker_address}"
f", taker_address={self.taker_address}"
f", fee_recipient_address={self.fee_recipient_address}"
f", sender_address={self.sender_address}"
f", exchange_address={self.exchange_address}"
f", maker_asset_amount={self.maker_asset_amount}"
f", taker_asset_amount={self.taker_asset_amount}"
f", maker_fee={self.maker_fee}"
f", taker_fee={self.taker_fee}"
f", salt={self.salt}"
f", maker_asset_data={self.maker_asset_data}"
f", taker_asset_data={self.taker_asset_data}"
f", expires={self.expiration_time}"
f", signature={self.signature}"
")"
)
__repr__ = __str__
@property
def hash(self):
"""Get hash of the order with lazy evaluation."""
if self.hash_ is None:
try_(self.update_hash)
return self.hash_
@property
def maker_address(self):
"""Get maker address as hex string."""
return self.maker_address_
@maker_address.setter
def maker_address(self, value):
"""Set maker address with validation.
Keyword argument:
value -- hex string of maker address
"""
self.maker_address_ = None if value is None else get_clean_address_or_throw(value)
@property
def taker_address(self):
"""Get taker address as hex string."""
return self.taker_address_
@taker_address.setter
def taker_address(self, value):
"""Set taker address with validation.
Keyword argument:
value -- hex string of taker address
"""
self.taker_address_ = None if value is None else get_clean_address_or_throw(value)
@property
def fee_recipient_address(self):
"""Get fee recipient address as hex string."""
return self.fee_recipient_address_
@fee_recipient_address.setter
def fee_recipient_address(self, value):
"""Set fee recipient address with validation.
Keyword argument:
value -- hex string of fee recipient address
"""
self.fee_recipient_address_ = None if value is None else get_clean_address_or_throw(value)
@property
def sender_address(self):
"""Get sender address as hex string."""
return self.sender_address_
@sender_address.setter
def sender_address(self, value):
"""Set sender address with validation.
Keyword argument:
value -- hex string of sender address
"""
self.sender_address_ = None if value is None else get_clean_address_or_throw(value)
@property
def exchange_address(self):
"""Get exchange address as hex string."""
return self.exchange_address_
@exchange_address.setter
def exchange_address(self, value):
"""Set exchange address with validation.
Keyword argument:
value -- hex string of exchange contract address
"""
self.exchange_address_ = None if value is None else get_clean_address_or_throw(value)
@property
def maker_asset_amount(self):
"""Get maker asset amount as integer in base units."""
return int(self.maker_asset_amount_)
@maker_asset_amount.setter
def maker_asset_amount(self, value):
"""Set maker asset amount in base units.
Keyword argument:
value -- integer-like maker asset amount in base units
"""
assert_like_integer(value)
self.maker_asset_amount_ = "{:.0f}".format(Decimal(value))
self.update_bid_price()
self.update_ask_price()
@property
def taker_asset_amount(self):
"""Get taker asset amount as integer in base units."""
return int(self.taker_asset_amount_)
@taker_asset_amount.setter
def taker_asset_amount(self, value):
"""Set taker asset amount in base units.
Keyword argument:
value -- integer-like taker asset amount in base units
"""
assert_like_integer(value)
self.taker_asset_amount_ = "{:.0f}".format(Decimal(value))
self.update_bid_price()
self.update_ask_price()
@property
def maker_fee(self):
"""Get maker fee as integer in base units."""
return int(self.maker_fee_)
@maker_fee.setter
def maker_fee(self, value):
"""Set maker fee in base units.
Keyword argument:
value -- integer-like maker fee in base units
"""
assert_like_integer(value)
self.maker_fee_ = "{:.0f}".format(Decimal(value))
@property
def taker_fee(self):
"""Get taker fee as integer in base units."""
return int(self.taker_fee_)
@taker_fee.setter
def taker_fee(self, value):
"""Set taker fee in base units.
Keyword argument:
value -- integer-like taker fee in base units
"""
assert_like_integer(value)
self.taker_fee_ = "{:.0f}".format(Decimal(value))
@property
def salt(self):
"""Get salt as integer."""
return int(self.salt_)
@salt.setter
def salt(self, value):
"""Set salt from integer-like.
Keyword argument:
value -- integer-like salt value
"""
assert_like_integer(value)
self.salt_ = "{:.0f}".format(Decimal(value))
@property
def expiration_time(self):
"""Get expiration as naive datetime."""
return try_(epoch_secs_to_local_time_str, self.expiration_time_seconds_)
@property
def expiration_time_seconds(self):
"""Get expiration time in seconds since epoch."""
return self.expiration_time_seconds_
@expiration_time_seconds.setter
def expiration_time_seconds(self, value):
"""Set expiration time secs from numeric-like.
Keyword argument:
value -- numeric-like expiration time in seconds since epoch
"""
self.expiration_time_seconds_ = int("{:.0f}".format(Decimal(value)))
@property
def maker_asset_data(self):
"""Get asset data as HexBytes."""
return self.maker_asset_data_
@maker_asset_data.setter
def maker_asset_data(self, value):
"""Set maker asset data.
Keyword argument:
value -- hexbytes-like maker asset data
"""
self.maker_asset_data_ = HexBytes(value).hex() if value is not None else None
@property
def taker_asset_data(self):
"""Get asset data as hex string."""
return self.taker_asset_data_
@taker_asset_data.setter
def taker_asset_data(self, value):
"""Set taker asset data
Keyword argument:
value -- hexbytes-like taker asset data
"""
self.taker_asset_data_ = HexBytes(value).hex() if value is not None else None
@property
def signature(self):
"""Return the signaure of the SignedOrder."""
return self.signature_
@signature.setter
def signature(self, value):
"""Set the signature."""
self.signature_ = HexBytes(value).hex() if value is not None else None
@property
def created_at_msecs(self):
"""Get creation time in milliseconds since epoch."""
return self.created_at_msecs_
@property
def created_at(self):
"""Get creation time timestamp as naive :class:`DateTime`."""
return try_(epoch_msecs_to_local_time_str, self.created_at_msecs_)
@property
def bid_price(self):
"""Get bid price as a Decimal."""
return try_(Decimal, self.bid_price_, default_=Decimal(0))
@property
def ask_price(self):
"""Get ask price as a Decimal."""
return try_(Decimal, self.ask_price_, default_=Decimal("9" * 32))
@property
def sort_price(self):
"""Get sort price.
This is useful for full set order which result in a mix of bids and asks
(hint: make use of `set_bid_price_as_sort_price` and its equivalent
`set_bid_price_as_sort_price`)
"""
return Decimal(self.sort_price_)
def update_hash(self):
"""Update the hash of the order and return the order."""
self.hash_ = self.get_order_hash(self.to_json())
return self
def update(self):
"""Call all update functions for order and return order."""
self.update_hash()
return self
def update_bid_price(self):
"""Bid price is price of taker asset per unit of maker asset.
(i.e. price of taker asset which maker is bidding to buy)
"""
try:
self.bid_price_ = "{:032.18f}".format(
Decimal(self.taker_asset_amount) / Decimal(self.maker_asset_amount))
except: # noqa E722 pylint: disable=bare-except
self.bid_price_ = "0" * 32
return self
def update_ask_price(self):
"""Ask price is price of maker asset per unit of taker asset.
(i.e. price of maker asset the maker is asking to sell)
"""
try:
self.ask_price_ = "{:032.18f}".format(
Decimal(self.maker_asset_amount) / Decimal(self.taker_asset_amount))
except: # noqa E722 pylint: disable=bare-except
self.ask_price_ = "9" * 32
return self
def set_bid_as_sort_price(self):
"""Set the `sort_price_` field to be the `bid_price_`.
This can be useful for sorting full set orders
"""
self.sort_price_ = self.bid_price_
return self
def set_ask_as_sort_price(self):
"""Set the `_sort_price` field to be the `ask_price_`.
This can be useful for sorting full set orders
"""
self.sort_price_ = self.ask_price_
return self
def to_json(
self,
include_hash=False,
include_signature=True,
include_exchange_address=None,
for_web3=False,
):
"""Get a json representation of the SignedOrder.
Args:
include_hash (bool): whether to include the hash field
(default: False)
include_signature (bool): whether to include the signature
(default: True)
include_exchange_address (bool): whether to include the
exchange_address field (default: None, which means if set to
False for web3 and set to True for non-web3 use case)
for_web3 (bool): whether the value types should be changed
for calling 0x contracts through web3 library (default:
False)
"""
if for_web3:
if include_exchange_address is None:
include_exchange_address = False
order = {
"makerAddress": to_checksum_address(self.maker_address_),
"takerAddress": to_checksum_address(self.taker_address_),
"feeRecipientAddress": to_checksum_address(self.fee_recipient_address_),
"senderAddress": to_checksum_address(self.sender_address_),
"makerAssetAmount": int(self.maker_asset_amount_),
"takerAssetAmount": int(self.taker_asset_amount_),
"makerFee": int(self.maker_fee_),
"takerFee": int(self.taker_fee_),
"salt": int(self.salt_),
"expirationTimeSeconds": int(self.expiration_time_seconds_),
"makerAssetData": HexBytes(self.maker_asset_data_),
"takerAssetData": HexBytes(self.taker_asset_data_),
}
if include_hash:
order["hash"] = HexBytes(self.hash)
if include_signature:
order["signature"] = HexBytes(self.signature)
if include_exchange_address:
order["exchangeAddress"] = HexBytes(self.exchange_address_)
else:
if include_exchange_address is None:
include_exchange_address = True
order = {
"makerAddress": self.maker_address_,
"takerAddress": self.taker_address_,
"feeRecipientAddress": self.fee_recipient_address_,
"senderAddress": self.sender_address_,
"makerAssetAmount": self.maker_asset_amount_,
"takerAssetAmount": self.taker_asset_amount_,
"makerFee": self.maker_fee_,
"takerFee": self.taker_fee_,
"salt": self.salt_,
"expirationTimeSeconds": self.expiration_time_seconds_,
"makerAssetData": self.maker_asset_data_,
"takerAssetData": self.taker_asset_data_,
}
if include_hash:
order["hash"] = self.hash
if include_signature:
order["signature"] = self.signature_
if include_exchange_address:
order["exchangeAddress"] = self.exchange_address_
return order
@classmethod
def get_order_hash(cls, order_json):
"""Returns hex string hash of 0x order
Args:
order_json (dict): a dict conforming to "/signedOrderSchema"
or "/orderSchema" (dependign on whether `include_signature`
is set to True or False) schemas can be found
`here <https://github.com/0xProject/0x-monorepo/tree/development/
packages/json-schemas/schemas>`__
Returns:
(str) order hash
"""
order = order_json
eip712_domain_struct_hash = keccak(
EIP712_DOMAIN_STRUCT_HEADER
+ HexBytes(order["exchangeAddress"]).rjust(32, b"\0")
)
eip712_order_struct_hash = keccak(
EIP712_ORDER_SCHEMA_HASH
+ HexBytes(order["makerAddress"]).rjust(32, b"\0")
+ HexBytes(order["takerAddress"]).rjust(32, b"\0")
+ HexBytes(order["feeRecipientAddress"]).rjust(32, b"\0")
+ HexBytes(order["senderAddress"]).rjust(32, b"\0")
+ int(order["makerAssetAmount"]).to_bytes(32, byteorder="big")
+ int(order["takerAssetAmount"]).to_bytes(32, byteorder="big")
+ int(order["makerFee"]).to_bytes(32, byteorder="big")
+ int(order["takerFee"]).to_bytes(32, byteorder="big")
+ int(order["expirationTimeSeconds"]).to_bytes(32, byteorder="big")
+ int(order["salt"]).to_bytes(32, byteorder="big")
+ keccak(HexBytes(order["makerAssetData"]))
+ keccak(HexBytes(order["takerAssetData"]))
)
return "0x" + keccak(
EIP191_HEADER
+ eip712_domain_struct_hash
+ eip712_order_struct_hash
).hex()
@classmethod
def from_json(
cls,
order_json,
check_validity=False,
include_signature=True,
):
"""Given a json representation of a signed order, return a SignedOrder object
Args:
order_json (dict): a dict conforming to "/signedOrderSchema"
or "/orderSchema" (dependign on whether `include_signature`
is set to True or False) schemas can be found
`here <https://github.com/0xProject/0x-monorepo/tree/development/
packages/json-schemas/schemas>`__
check_validity (bool): whether we should do an explicit check
to make sure the passed in dict adheres to the required
schema (default: True)
include_signature (bool): whether the object is expected to have
the signature on it or not. This will affect whether
"/signedOrderSchema" or "/orderSchema" is used for validation
(default: True)
"""
order = cls()
if check_validity:
if include_signature:
assert_valid(order_json, "/signedOrderSchema")
else:
assert_valid(order_json, "/orderSchema")
order.maker_address = order_json["makerAddress"]
order.taker_address = order_json["takerAddress"]
order.maker_fee = order_json["makerFee"]
order.taker_fee = order_json["takerFee"]
order.sender_address = order_json["senderAddress"]
order.maker_asset_amount = order_json["makerAssetAmount"]
order.taker_asset_amount = order_json["takerAssetAmount"]
order.maker_asset_data = order_json["makerAssetData"]
order.taker_asset_data = order_json["takerAssetData"]
order.salt = order_json["salt"]
order.exchange_address = order_json["exchangeAddress"]
order.fee_recipient_address = order_json["feeRecipientAddress"]
order.expiration_time_seconds = order_json["expirationTimeSeconds"]
if include_signature:
order.signature = order_json["signature"]
order.update()
return order
class ZxWeb3Client(Web3Client):
"""Client for interacting with 0x contracts."""
__name__ = "ZxWeb3Client"
def __init__(
self,
network_id: int,
web3_rpc_url: str,
private_key: Optional[Union[HexBytes, str]] = None,
):
"""Create an instance of the ZxWeb3Client.
Args:
network_id (int): id of network from :class:`NetworkId`
web3_rpc_url (str): URL of the Web3 service
private_key (:class:`HexBytes` or strstr): hex bytes or hexstr
of private key for signing transactions (must be convertible
to :class:`HexBytes`) (default: None)
"""
super(ZxWeb3Client, self).__init__(
network_id=network_id,
web3_rpc_url=web3_rpc_url,
private_key=private_key,
)
self._contract_addressess = \
ZX_CONTRACT_ADDRESSES[NetworkId(self._network_id)]
self._zx_exchange = None
@property
def exchange_address_checksumed(self):
"""Get checksummed address of the 0x Exchange contract."""
return to_checksum_address(self._contract_addressess["exchange"])
@property
def zx_exchange(self):
"""Get an instance of the 0x Exchange contract."""
if self._zx_exchange is None:
self._zx_exchange = self.web3_eth.contract(
address=self.exchange_address_checksumed,
abi=abi_by_name("Exchange"))
return self._zx_exchange
def sign_hash_zx_compat(self, hash_hex: Union[HexBytes, str]) -> str:
"""Get a zx-compatible signature from signing a hash_hex with eth-sign.
Args:
hash_hex (:class:`HexBytes` or hashstr): hash to sign
Returns:
(str) 0x-compatible signature of hash
"""
ec_signature = self.sign_hash(hash_hex)
return self.get_zx_signature_from_ec_signature(ec_signature=ec_signature)
@staticmethod
def get_zx_signature_from_ec_signature(ec_signature) -> str:
"""Get a hexstr 0x-compatible signature from an eth-sign ec_signature
0x signature is a hexstr made from the concatenation of the hexstr of the "v",
r", and "s" parameters of an ec_signature and ending with constant "03" to
indicate "eth-sign" was used.
The "r" and "s" parts of the signature need to each represent 32 bytes
and so will be righ-justified with "0" padding on the left (i.e. "r" and "s"
will be strings of 64 hex characters each, which means no leading "0x")
Args:
ec_singature (dict): A dict containing "r", "s" and "v"
parameters of an elliptic curve signature as integers
Returns:
(str) 0x-compatible hash signature
"""
v = hex(ec_signature["v"]) # pylint: disable=invalid-name
r = HexBytes(ec_signature["r"]).rjust(32, b"\0").hex() # pylint: disable=invalid-name
s = HexBytes(ec_signature["s"]).rjust(32, b"\0").hex() # pylint: disable=invalid-name
# append "03" to specify signature type of eth-sign
return v + r + s + "03"
def cancel_zx_order(
self,
zx_signed_order: ZxSignedOrder,
tx_params: TxParams = None,
) -> Union[HexBytes, bytes]:
"""Call the cancelOrder function of the 0x Exchange contract
Args:
zx_signed_order (:class:`ZxSignedOrder`): order to cancel
tx_params (:class:`TxParams`): transaction options (default:
None)
Returns:
(:class:`HexBytes`) transaction hash
"""
order = zx_signed_order.to_json(for_web3=True)
func = self.zx_exchange.functions.cancelOrder(order)
return self._invoke_function_call(
func, tx_params=tx_params, view_only=False)
def fill_zx_order(
self,
zx_signed_order: ZxSignedOrder,
taker_fill_amount: int,
base_unit_decimals: int = 18,
tx_params: TxParams = None,
) -> Union[HexBytes, bytes]:
"""Call the fillOrder function of the 0x Exchange contract.
Args:
zx_signed_order (:class:`ZxSignedOrder`): order to fill
taker_fill_amount (int): amount of taker asset (will be converted
to base units by multiplying by 10**base_unit_decimals)
base_unit_decimals (int): number of base unit decimals (default:
18)
tx_params (:class:`TxParams`): transaction options (default:
None)
Returns:
(:class:`HexBytes`) transaction hash
"""
signed_order = zx_signed_order.to_json(for_web3=True)
signature = HexBytes(zx_signed_order.signature)
taker_fill_amount = int(taker_fill_amount * 10**base_unit_decimals)
func = self.zx_exchange.functions.fillOrder(
signed_order,
taker_fill_amount,
signature
)
return self._invoke_function_call(
func, tx_params=tx_params, view_only=False)
| officialcryptomaster/pyveil | src/utils/zeroexutils.py | zeroexutils.py | py | 30,368 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "utils.web3utils.NetworkId.MAINNET",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "utils.web3utils.NetworkId",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "utils.web3utils.NetworkId.ROPSTEN",
"line_number": 33,
"usage_type": "at... |
38604120141 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 12 11:47:11 2021
@author: Souparno
"""
import torch
#from transformers import AutoTokenizer, AutoModelWithLMHead
from transformers import T5Tokenizer, T5ForConditionalGeneration
from methodology_bs4 import *
#from correlated_words_0 import *
from summarygenerator import *
# tokenizer = AutoTokenizer.from_pretrained('t5-base',local_files_only=True) ##local_files = False if downloading the model from hugging face
# model = AutoModelWithLMHead.from_pretrained('t5-base', return_dict=True,local_files_only=True) ##local_files = False if downloading the model from hugging face
tokenizer = T5Tokenizer.from_pretrained('t5-base',local_files_only=True)
model = T5ForConditionalGeneration.from_pretrained('t5-base', return_dict=True,local_files_only=True)
search_query=input('enter the relevant key words: ')
no_articles_fetched=input('enter the no. of articles to be fetched: ')
filename,df,meth_ids=Methodology(search_query,no_articles_fetched)
methodologysummary=Summarizer(filename,5)
inputs = tokenizer.encode("summarize: " + methodologysummary,
return_tensors='pt',
max_length=2048,
truncation=True)
summary_ids = model.generate(inputs, max_length=200, min_length=30, length_penalty=10.,num_return_sequences=10,early_stopping=False, num_beams=10)
summary = tokenizer.decode(summary_ids[0])
print(summary)
#data=pd.read_csv(filename)
#keywordframe=keywordExtraction(filename,search_query)
# name=search_query+' data+keywords.csv'
# keywordframe=keywordframe[['PMCID', 'Title', 'Methodology', 'keywords','Inclusion Criteria','Exclusion Criteria']]
# keywordframe.to_csv(name)
| Chattopadhyay-Souparno/Medical-Writing-Automation | main_file.py | main_file.py | py | 1,785 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "transformers.T5Tokenizer.from_pretrained",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "transformers.T5Tokenizer",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "transformers.T5ForConditionalGeneration.from_pretrained",
"line_number": 21... |
40772316728 | # 1.导包
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
# 2.创建浏览器驱动对象
driver = webdriver.Chrome()
# 3.打开测试网址打开注册A.html页面,完成以下操作
driver.get(
"file:///C:/Users/sandysong/Desktop/pagetest/%E6%B3%A8%E5%86%8CA.html")
# 4.业务操作
# 1).使用CSS定位方式中id选择器定位用户名输入框,并输入:admin
driver.find_element(By.CSS_SELECTOR, "#userA").send_keys("admin")
time.sleep(2)
# 2).使用CSS定位方式中属性选择器定位密码输入框,并输入:123456
driver.find_element(By.CSS_SELECTOR, "[placeholder='请输入密码']").send_keys('123456')
time.sleep(2)
# 3).使用CSS定位方式中class选择器定位电话号码输入框,并输入:18600000000
driver.find_element(By.CSS_SELECTOR, ".telA").send_keys("18600000000")
time.sleep(2)
# 4).使用CSS定位方式中元素选择器定位注册按钮,并点击
driver.find_element(By.CSS_SELECTOR, "button").click()
# 5.3秒后关闭浏览器窗口
time.sleep(3)
driver.quit()
| 1769778682/day03 | test_09_css_前4种.py | test_09_css_前4种.py | py | 1,063 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 12,
"usage_type": "attribute"
... |
30632337598 | import mapclient.splash_rc
from PySide6 import QtCore, QtGui, QtWidgets
class SplashScreen(QtWidgets.QSplashScreen):
def __init__(self):
super(SplashScreen, self).__init__()
pixmap = QtGui.QPixmap(":/mapclient/splash.png")
self.setPixmap(pixmap)
self._font = QtGui.QFont()
self._font.setPixelSize(20)
self._progress_bar = QtWidgets.QProgressBar()
self._progress_bar.setRange(0, 100)
self._progress_bar.setGeometry(0, 0, pixmap.width() - 0, 8)
# self.setStyleSheet("""
# QProgressBar {
# border: 2px solid grey;
# border-radius: 5px;
# }
#
# QProgressBar::chunk {
# background-color: #05B8CC;
# width: 20px;
# } """)
self._pixmap_height = pixmap.height()
self.setFont(self._font)
def drawContents(self, painter):
self._progress_bar.render(painter, QtCore.QPoint(0, self._pixmap_height - 8))
super(SplashScreen, self).drawContents(painter)
def showMessage(self, message, progress=0):
self._progress_bar.setValue(progress)
super(SplashScreen, self).showMessage(' ' + message, QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom)
| MusculoskeletalAtlasProject/mapclient | src/mapclient/splashscreen.py | splashscreen.py | py | 1,179 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "PySide6.QtWidgets.QSplashScreen",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "PySide6.QtWidgets",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PySide6.QtGui.QPixmap",
"line_number": 9,
"usage_type": "call"
},
{
"api_nam... |
74532493792 | import random
from art import logo
from art import vs
from game_data import data
import os
# make this as function
# format the account data into printable format
# name = random_data['name']
# follower_count = random_data['follower_count']
# description = random_data['description']
# country = random_data['country']
def format_data(account):
name = account['name']
description = account['description']
country = account['country']
return f"{name}, a {description}, from {country}"
def follow_count(account):
count = account['follower_count']
return count
def check_answer(guess, count_a, count_b):
if count_a > count_b:
# if guess == 'a':
# return True
# else:
# return False
# simply do this a is true
return guess == 'a'
else:
return guess == 'b'
# Display Art
print(logo)
score = 0
account_a = random.choice(data)
account_b = random.choice(data)
game_continue = True
while game_continue:
# generate a random account
account_a = account_b
account_b = random.choice(data)
while account_a == account_b:
account_b = random.choice(data)
print(f"Compare A: {format_data(account_a)}")
print(vs)
print(f"Against B: {format_data(account_b)}")
follower_count_a = follow_count(account_a)
follower_count_b = follow_count(account_b)
guess = input("Who has more followers? Type 'A' or 'B': ").lower()
print(logo)
is_correct = check_answer(guess, follower_count_a, follower_count_b)
if is_correct:
score += 1
game_continue = True
print(f"You are right! Your current score is {score}")
else:
game_continue = False
print(f"You are wrong. Your final score is {score}")
| yong197578/high-and-low-game | main.py | main.py | py | 1,769 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "art.logo",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "random.choice",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "game_data.data",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "random.choice",
"li... |
18332363530 | import json
from pathlib import Path
# Define the input and output paths
input_filepath = Path('./data/questions.json')
output_directory = Path('./data/qna')
# Expecting the questions.json with an array of { source, question, answer } pair tuples.
with open(input_filepath, 'r') as input_file:
input_json = json.load(input_file)
current_source = None
current_file = None
lines = []
for input_tuple in input_json:
src = input_tuple["source"]
if current_source != src:
if current_file:
current_file.writelines(lines[:-1])
current_file.flush()
current_file.close()
file_name_stub = src.split("/")[-1].split(".")[0]
file_path = f"{output_directory}/{file_name_stub}_qna.md"
current_file = open(file_path, 'w')
current_source = src
lines = []
# with open(src, 'r') as src_file:
# src_lines = src_file.readlines()
# title = src_lines[0][1:].strip()
# lines = [f"# {title} Questions and Answers:\n", "\n"]
answer = input_tuple['answer'].replace("\n\n", "\n").replace("\n\n", "\n").replace("\n\n", "\n")
lines.extend([
f"Question: {input_tuple['question']}\n",
f"Answer: {answer}\n",
"---\n"
])
current_file.writelines(lines[:-1])
current_file.flush()
current_file.close()
| CsabaConsulting/Vectara | augment_prep.py | augment_prep.py | py | 1,458 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
}
] |
30002086737 | import numpy as np
from torch import nn
from xavier.constants.type import Type
from xavier.core.transformation import get_standard
class Rnn(nn.Module):
NAME_TYPE = Type.rnn
def __init__(self, device=None):
super(Rnn, self).__init__()
self.output_layer = 3
self.device = device
self.lstm = nn.LSTM(5, 16, 2, batch_first=True)
self.classifier = nn.Linear(16 * 5, self.output_layer, bias=True)
def forward(self, x):
x, _ = self.lstm(x)
x = x.contiguous().view(-1, 16, 5)
x = x.view(-1, 16 * 5)
x = self.classifier(x)
return x
def convert_standard(self, feature):
x_row = np.asarray(get_standard([feature], self.standard))[0]
data_standard = np.asarray(x_row).reshape((5, 5))
data_standard = np.array([data_standard])
return data_standard
| fabriciotorquato/pyxaiver-v2 | xavier/net/rnn.py | rnn.py | py | 870 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "xavier.constants.type.Type.rnn",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "xavier... |
27841424849 | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, Flask
)
#from werkzeug.exceptions import abort
#from flask_login import current_user, login_user, logout_user, login_required
bp = Blueprint('home', __name__)
@bp.route('/')
@bp.route('/home')
def index():
return render_template(
'home.html',
)
| snickr42/cookbook | cookr/home.py | home.py | py | 365 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
}
] |
22290817065 | from collections import deque
class Solution:
def solve(self, nums):
# Write your code here
def bfs(index, memo):
visited = set([index])
queue = deque([(index, 0)])
best = 2 * len(nums)
while queue:
curr, length = queue.popleft()
possib = []
if 0 <= curr - nums[curr] < len(nums) and curr - nums[curr] not in visited:
possib.append(curr - nums[curr])
if 0 <= curr + nums[curr] < len(nums) and curr + nums[curr] not in visited:
possib.append(curr + nums[curr])
for possible in possib:
if nums[possible] % 2 != nums[index] % 2:
return min(best, length) + 1
if memo[possible] != 2 * len(nums):
if memo[possible] != -1:
best = min(best, memo[possible] + length)
else:
queue.append((possible, length + 1))
visited.add(possible)
if best != 2 * len(nums):
return best + 1
return -1
final = [-1] * len(nums)
memo = [2 * len(nums)] * len(nums)
for index in range(len(nums)):
short = bfs(index, memo)
final[index] = short
memo[index] = short
return final
from termcolor import colored, cprint
solution = Solution()
found = solution.solve([5, 1, 2, 3, 4, 7, 4, 5])
print(found)
print(found == [-1, 1, 1, 1, 1, -1, 2, 1])
found = solution.solve([14, 29, 94, 63, 55, 29, 35, 31, 15, 6, 34, 52, 74, 31, 58, 92, 96, 80, 36, 99, 39, 74, 52, 50, 36, 81, 11, 23, 85, 58, 84, 54, 98, 3, 52, 67, 51, 20, 26, 76, 15, 53, 44, 43, 64, 63, 2, 23, 45, 76, 33, 8, 33, 3, 6, 57, 93, 96, 88, 29, 71, 92, 81, 6, 97, 48, 94, 17, 20, 74, 51, 20, 30, 54, 90, 88, 70, 76, 67, 92, 69, 20, 47, 37, 64, 27, 80, 22, 100, 23, 65, 65, 44, 43, 67, 58, 66, 95, 31, 10])
sol = [5, 1, -1, 1, 2, 1, 5, 1, 1, 1, -1, -1, 2, 1, 4, -1, -1, 1, 2, -1, 2, 4, -1, 2, 1, -1, 1, 2, -1, 4, -1, 1, -1, 1, 2, -1, 1, 2, 1, -1, -1, 4, 2, 1, -1, -1, 1, 1, 2, -1, 1, 1, 2, 2, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 2, -1, 1, 1, -1, -1, 1, 3, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 3, -1, 1, -1, 2, 1, 2, 3, 3, -1, 1, 2, 1]
solved = found == sol
print(solved)
if not solved:
for index in range(len(found)):
if found[index] != sol[index]:
cprint(f"{found[index]}: {index}", 'red', end = ' ')
else:
cprint(found[index], 'green', end=' ')
print()
| dhrumilp15/Puzzles | binsearch/parity_jump_memo.py | parity_jump_memo.py | py | 2,701 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "termcolor.cprint",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "termcolor.cprint",
"line_number": 61,
"usage_type": "call"
}
] |
40372539590 | from bs4 import BeautifulSoup
with open("indexTwo.html", "r") as f:
document = BeautifulSoup(f, "html.parser")
tags = document.find_all('input', type="text")
for tag in tags:
tag['placeholder'] = "I Love to change things :)"
with open('change.html', 'w') as f:
f.write(str(document))
| Vselenis/Python-Advanced-April-2021 | Web Scraping Project/demo/partTwo.py | partTwo.py | py | 299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 4,
"usage_type": "call"
}
] |
6420744938 | import math
import gym
from Acrobot_DQN import Acrobot_DQN
from Acrobot_Game import Acrobot_Game
env = gym.make("Acrobot-v1")
min_len4train = 100
max_len4train = 50_000
DISCOUNT = 0.90
min_batch = 64
Batch_Size = 32
SHOW_EVERY = 200
UPDATE_SECONDARY_WEIGHTS = False
UPDATE_SECONDARY_WEIGHTS_NUM = 4
EPISODES = 1000
epsilon = 1
epsilon_mul_value = math.log(0.01, 10)/(EPISODES * 0.8)
epsilon_mul_value = math.pow(10, epsilon_mul_value)
#main
Neuron = Acrobot_DQN(max_len4train, UPDATE_SECONDARY_WEIGHTS, min_batch, min_len4train, DISCOUNT, Batch_Size)
Acrobot_Game(Neuron, env, EPISODES, min_len4train, epsilon, epsilon_mul_value, SHOW_EVERY, UPDATE_SECONDARY_WEIGHTS_NUM)
#saving model and weights
Neuron.model.save_weights("weights")
Neuron.model.save("model.h5")
| Soester10/DRL-Gym-Env | Acrobot/main.py | main.py | py | 810 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.make",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "Acrobot_DQN.Acrobot_DQN",
"line_number"... |
41973668035 | from musikla.parser.printer import CodePrinter
from typing import Any, Optional, Tuple, List
from .statement_node import StatementNode
from ..node import Node
from musikla.core import Value, Context
class MultiVariableDeclarationStatementNode( StatementNode ):
def __init__ ( self, left : List[Node], right : Node, operator : Optional[str] = None, local : bool = False, position : Tuple[int, int, int] = None ):
super().__init__( position )
self.left : Any = left
self.right : Any = right
self.operator : Optional[str] = operator
self.local : bool = local
for op in self.left:
if self.operator is None and not hasattr( op, 'assign' ):
raise BaseException( f"Left hand side \"{CodePrinter().print(op)}\" cannot be used in an attribution" )
elif self.operator is not None and not hasattr( op, 'lookup_assign' ):
raise BaseException( f"Left hand side \"{CodePrinter().print(op)}\" cannot be used in an attribution" )
def __eval__ ( self, context : Context ):
val = Value.assignment( self.right.eval( context.fork( cursor = 0 ) ) )
for i, op in enumerate( self.left ):
if self.operator is None or self.operator == "":
op.assign( context, val[ i ], local = self.local )
else:
def _set ( value ):
nonlocal val
if self.operator == '*': value *= val[ i ]
elif self.operator == '/': value /= val[ i ]
elif self.operator == '+': value += val[ i ]
elif self.operator == '-': value -= val[ i ]
elif self.operator == '&': value &= val[ i ]
elif self.operator == '|': value |= val[ i ]
else: raise Exception( "Invalid operator: " + self.operator )
return value
op.lookup_assign( context, _set, local = self.local )
return None
def to_source ( self, printer : CodePrinter ):
for i, op in enumerate( self.to_source ):
if i > 0:
printer.add_token( ", " )
op.to_source( printer )
printer.add_token( f" { self.operator if self.operator is not None else '' }= " )
self.right.to_source( printer )
| pedromsilvapt/miei-dissertation | code/musikla/musikla/parser/abstract_syntax_tree/statements/multi_var_declaration_node.py | multi_var_declaration_node.py | py | 2,354 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "statement_node.StatementNode",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "node.Node",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
11747480348 |
from . import blueprint
from .settings import SettingClass
from .forms import SettingsForm
from flask import request, current_app
from .error import FormNotFound
@blueprint.get('/')
def ping():
return 'ping'
@blueprint.post('/s/<setting_key>/set')
def set_value(setting_key):
setting: SettingClass = SettingClass.get_group(setting_key)
if not setting:
return 'Failed'
setting.set_properties(request.json)
if current_app.config.get('FLSETT_AUTO_SAVE_SETTINGS'):
setting.save()
return 'Success'
@blueprint.post('/f/<form_key>')
def setting_form_enpoint(form_key):
for subc in SettingsForm.__subclasses__():
if subc.get_key() == form_key:
r = subc.on_data(request.json)
if r is True:
return 'Success'
if r is None:
return 'Unknow State'
return r
raise FormNotFound(form=form_key)
| LordBex/flask-settings | flasky_settings/main.py | main.py | py | 920 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "settings.SettingClass",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "settings.SettingClass.get_group",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 18,
"usage_type": "attribute"
},
{
"api... |
22865648549 | import pytest
from src.app import create_app, DB
from src.app import create_app
from src.app.routes import routes
from flask import json
from sqlalchemy import event
mimetype = 'application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
@pytest.fixture(scope="session")
def app():
# Estamos testando no ambiente de teste
app_on = create_app('development')
routes(app_on)
return app_on
@pytest.fixture
def logged_in_client(client):
data = {
"email": "luislopes@gmail.com",
"password": "123Mudar!"
}
response = client.post("user/login", data=json.dumps(data), headers=headers)
return response.json["token"]
# é tipo função e autouse quer dizer que sempre vai ser invocada essa função quando meus testes rodar
@pytest.fixture(scope="function", autouse=True)
def session(app):
with app.app_context():
connection = DB.engine.connect() # nova coneção
transaction = connection.begin() # inicia a conexão, a transaction
options = dict(bind=connection, binds={}) # dicionario
sess = DB.create_scoped_session(options=options) # cria minha session
sess.begin_nested()
@event.listens_for(sess(), 'after_transaction_end')
def restart_savepoint(sess2, trans):
if trans.nested and not trans._parent.nested:
sess2.expire_all()
sess2.begin_nested()
DB.session = sess
#O yield faz parte do protocolo de iteradores do python, ele evita você ter que criar o elemento iterável efetivamente, tornando seu código mais escalável
yield sess
sess.remove()
transaction.rollback()
connection.close() | juliasilvamoura/DEVinHousa-conectaNuvem | Modulo3/Modulo3-Flask/tests/conftest.py | conftest.py | py | 1,713 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "src.app.create_app",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "src.app.routes.routes",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.json.du... |
29845106401 | from fastapi import APIRouter, Form, UploadFile, File
from router.chart.upload_s3 import upload_s3
from database.db import save_chart_info
from PIL import Image
from pydub import AudioSegment
import io
import os
import random, string
import datetime
import hashlib
import json
import requests
with open("config.json", encoding="utf-8") as f:
config = json.load(f)
router = APIRouter()
@router.post("/up_chart")
async def upload_chart(
title: str = Form(...),
composer: str = Form(...),
chart_author: str = Form(...),
difficulty: int = Form(...),
description: str = Form(...),
bgm_file: UploadFile = File(...),
jacket_file: UploadFile = File(...),
chart_file: UploadFile = File(...),
):
# 譜面IDを決める(0-9,a-fからなる5文字)
characters = string.digits + "abcdef"
chart_id = ''.join(random.choice(characters) for _ in range(5))
# データを読み込む
bgm = await bgm_file.read()
jacket = await jacket_file.read()
chart = await chart_file.read()
# 変換
# bgm
audio = AudioSegment.from_file(io.BytesIO(bgm))
bgm = io.BytesIO()
audio.export(bgm, format="mp3", bitrate="128k")
bgm.seek(0)
# ジャケット
img = Image.open(io.BytesIO(jacket))
img = img.convert('RGB').resize((500, 500))
jacket = io.BytesIO()
img.save(jacket, format='JPEG')
jacket.seek(0)
# 譜面
url = config["convert_url"]
response = requests.post(url, params={"chart_id": chart_id}, data=chart)
chart = response.content
# 背景画像を生成
background_path = os.path.join("router", "chart", "bg", "background.png")
ingame_img_path = os.path.join("router", "chart", "bg", "ingame_bg.png")
background_image = Image.open(background_path)
ingame_image = Image.open(ingame_img_path)
jacket_image = Image.open(jacket).resize((450,450))
background_image.paste(jacket_image, (730,168))
background_image.paste(ingame_image, (0,0), ingame_image)
background = io.BytesIO()
background_image.save(background, format="PNG")
background.seek(0)
jacket.seek(0)
# sha1hashを計算
bgm = bgm.read()
jacket = jacket.read()
background = background.read()
bgm_hash = hashlib.sha1(bytes(bgm)).hexdigest()
jacket_hash = hashlib.sha1(bytes(jacket)).hexdigest()
chart_hash = hashlib.sha1(bytes(chart)).hexdigest()
background_hash = hashlib.sha1(bytes(background)).hexdigest()
# アップロード
upload_s3(content=bgm, path=f"ymfan/{chart_id}/bgm.mp3")
upload_s3(content=jacket, path=f"ymfan/{chart_id}/jacket.jpg")
upload_s3(content=chart, path=f"ymfan/{chart_id}/chart.gz")
upload_s3(content=background, path=f"ymfan/{chart_id}/background.png")
# 曲のデータをデータベース上に保存
save_chart_info(
chart_id = chart_id,
title = title,
composer = composer,
chart_author = chart_author,
difficulty = difficulty,
description = description,
bgm_url = f"{config['S3_url']}/ymfan/{chart_id}/bgm.mp3",
bgm_hash = bgm_hash,
jacket_url = f"{config['S3_url']}/ymfan/{chart_id}/jacket.jpg",
jacket_hash = jacket_hash,
chart_url = f"{config['S3_url']}/ymfan/{chart_id}/chart.gz",
chart_hash = chart_hash,
background_url = f"{config['S3_url']}/ymfan/{chart_id}/background.png",
background_hash = background_hash,
post_at = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y-%m-%d %H:%M:%S'),
update_at = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y-%m-%d %H:%M:%S')
) | Kyonkrnk/ymfan | router/chart/upload.py | upload.py | py | 3,799 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "router.chart.upload_s3",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fastapi.UploadFile... |
19015658590 | from typing import Any
import openai
import os
OPEN_AI_KEY: str | None = os.environ.get("OPENAI_API_KEY")
if not OPEN_AI_KEY:
raise ValueError("Missing OPENAI_API_KEY env variable")
openai.api_key = OPEN_AI_KEY
def get_chat_completion(
user_message: str, model="gpt-3.5-turbo", max_tokens=500, temperature=0.7
) -> str:
"""Get a chat completion from the specified OpenAI GPT model.
Args:
user_message: The message to send to the OpenAI GPT model.
model: The GPT model to use.
max_token: The max number of tokens to consume in the request.
temperature: The "creativity" of the response, 0 being more precise and consistent.
Returns:
The response from the model.
"""
completion: Any = openai.ChatCompletion.create(
messages=[{"role": "user", "content": user_message}],
model=model,
max_tokens=max_tokens,
temperature=temperature,
)
choices: list = completion["choices"]
first_choice: dict = choices[0]
return first_choice["message"]["content"].replace("\n", " ").strip()
| jakecyr/sms-gpt | sms_gpt/open_ai_client.py | open_ai_client.py | py | 1,084 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "openai.api_key",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"li... |
20448987511 | import json
from random import Random
from config_data import seed, armies_config
from strategy import choose_squad
from army import Army
R = Random(seed)
class Battlefield:
def __init__(self, log_type, file=None):
self.armies = [Army(army["id"], army["chosen_strategy"])
for army in armies_config]
self.battles_counter = 0
self.report = {
"introduction": "",
"battles": [],
"conclusion": ""
}
self.log_type = log_type
self.file = file
def start(self):
self.report["introduction"] = \
f"There are {len(self.armies)} armies in the battle: " + \
f"{', '.join([army.id for army in self.armies])}"
while len(self.armies) > 1:
att_army = R.choice(self.armies)
def_army = R.choice([army
for army in self.armies
if army is not att_army])
att_sq = R.choice(att_army.squads)
def_sq = choose_squad(att_army.strategy, def_army)
dp = att_sq.damage
if att_sq.attack_success >= def_sq.attack_success and dp:
self.battles_counter += 1
battle_report = {"Battle": self.battles_counter}
battle_report["Attacking"] = \
f"Squad #{att_sq.id} from army {att_army.id}. " \
f"Contains {len(att_sq.units)} units"
battle_report["Defending"] = \
f"Squad #{def_sq.id} from army {def_army.id}. " \
f"Contains {len(def_sq.units)} units"
def_sq.get_damage(dp)
battle_report["Outcome"] = \
f"Defending squad get {dp:.2f} damage points"
def_army.left_active_squads()
att_sq.level_up()
before = len(self.armies)
self.armies = [army
for army in self.armies
if army.is_active]
after = len(self.armies)
if (after - before):
battle_report["Losses"] = \
f"Army {def_army.id} is not active anymore"
self.report["battles"].append(battle_report)
self.report["conclusion"] = \
f"It takes {self.battles_counter} battles. " \
f"Winner: {self.armies[0].id}"
self.to_print()
def to_print(self):
if self.log_type == 1:
return self.console_log()
elif self.log_type == 2:
return self.file_log()
def console_log(self):
print(self.report["introduction"])
for battle_report in self.report["battles"]:
for p in battle_report:
print(f"{p}: {battle_report[p]}")
print("\n")
print(self.report["conclusion"])
def file_log(self):
if not self.file:
raise Exception("File for logging is not found.")
with open(self.file, "w") as log_file:
json.dump(self.report, log_file, indent=4)
log_file.close()
| QueVege/Task-3 | battlefield.py | battlefield.py | py | 3,225 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.Random",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config_data.seed",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "army.Army",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config_data.armies_config"... |
38657153583 | import sys
from datetime import datetime
from typing import Collection, Dict, Tuple, List, Optional
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
MAX_DATA_ROW_IDS_PER_EXPORT_V2 = 2_000
class SharedExportFilters(TypedDict):
label_created_at: Optional[Tuple[str, str]]
""" Date range for labels created at
Formatted "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss"
Examples:
>>> ["2000-01-01 00:00:00", "2050-01-01 00:00:00"]
>>> [None, "2050-01-01 00:00:00"]
>>> ["2000-01-01 00:00:00", None]
"""
last_activity_at: Optional[Tuple[str, str]]
""" Date range for last activity at
Formatted "YYYY-MM-DD" or "YYYY-MM-DD hh:mm:ss"
Examples:
>>> ["2000-01-01 00:00:00", "2050-01-01 00:00:00"]
>>> [None, "2050-01-01 00:00:00"]
>>> ["2000-01-01 00:00:00", None]
"""
data_row_ids: Optional[List[str]]
""" Datarow ids to export
Only allows MAX_DATAROW_IDS_PER_EXPORT_V2 datarows
Example:
>>> ["clgo3lyax0000veeezdbu3ws4", "clgo3lzjl0001veeer6y6z8zp", ...]
"""
class ProjectExportFilters(SharedExportFilters):
pass
class DatasetExportFilters(SharedExportFilters):
pass
def validate_datetime(string_date: str) -> bool:
"""helper function validate that datetime is as follows: YYYY-MM-DD for the export"""
if string_date:
for fmt in ("%Y-%m-%d", "%Y-%m-%d %H:%M:%S"):
try:
datetime.strptime(string_date, fmt)
return True
except ValueError:
pass
raise ValueError(f"""Incorrect format for: {string_date}.
Format must be \"YYYY-MM-DD\" or \"YYYY-MM-DD hh:mm:ss\"""")
return True
def build_filters(client, filters):
search_query: List[Dict[str, Collection[str]]] = []
timezone: Optional[str] = None
def _get_timezone() -> str:
timezone_query_str = """query CurrentUserPyApi { user { timezone } }"""
tz_res = client.execute(timezone_query_str)
return tz_res["user"]["timezone"] or "UTC"
last_activity_at = filters.get("last_activity_at")
if last_activity_at:
if timezone is None:
timezone = _get_timezone()
start, end = last_activity_at
if (start is not None and end is not None):
[validate_datetime(date) for date in last_activity_at]
search_query.append({
"type": "data_row_last_activity_at",
"value": {
"operator": "BETWEEN",
"timezone": timezone,
"value": {
"min": start,
"max": end
}
}
})
elif (start is not None):
validate_datetime(start)
search_query.append({
"type": "data_row_last_activity_at",
"value": {
"operator": "GREATER_THAN_OR_EQUAL",
"timezone": timezone,
"value": start
}
})
elif (end is not None):
validate_datetime(end)
search_query.append({
"type": "data_row_last_activity_at",
"value": {
"operator": "LESS_THAN_OR_EQUAL",
"timezone": timezone,
"value": end
}
})
label_created_at = filters.get("label_created_at")
if label_created_at:
if timezone is None:
timezone = _get_timezone()
start, end = label_created_at
if (start is not None and end is not None):
[validate_datetime(date) for date in label_created_at]
search_query.append({
"type": "labeled_at",
"value": {
"operator": "BETWEEN",
"value": {
"min": start,
"max": end
}
}
})
elif (start is not None):
validate_datetime(start)
search_query.append({
"type": "labeled_at",
"value": {
"operator": "GREATER_THAN_OR_EQUAL",
"value": start
}
})
elif (end is not None):
validate_datetime(end)
search_query.append({
"type": "labeled_at",
"value": {
"operator": "LESS_THAN_OR_EQUAL",
"value": end
}
})
data_row_ids = filters.get("data_row_ids")
if data_row_ids:
if not isinstance(data_row_ids, list):
raise ValueError("`data_row_ids` filter expects a list.")
if len(data_row_ids) > MAX_DATA_ROW_IDS_PER_EXPORT_V2:
raise ValueError(
f"`data_row_ids` filter only supports a max of {MAX_DATA_ROW_IDS_PER_EXPORT_V2} items."
)
search_query.append({
"ids": data_row_ids,
"operator": "is",
"type": "data_row_id"
})
return search_query | nicole-kozhuharova/bachelorArbeit | venv/Lib/site-packages/labelbox/schema/export_filters.py | export_filters.py | py | 5,201 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "typing_extensions.TypedDict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typi... |
11111319634 | from apikeys import key_geocoder
import requests
import json
def get_ll_by_name(name, pt=False):
name = ''.join(name.split())
req = f"http://geocode-maps.yandex.ru/1.x/?apikey={key_geocoder}&geocode={name}&size=650,450&format=json"
response = requests.get(req)
# with open('response.json', 'w') as jsonfile:
# json.dump(response.json(), jsonfile)
ll = response.json()['response']["GeoObjectCollection"]["featureMember"][0][
"GeoObject"]['Point']['pos'].split()
ll = ','.join(ll)
return ll
# перемещает карту в зависимости от указанного направления
def move_map(direction, ll, spn_x, spn_y):
size = "650,450"
if ll is None:
return ll
ll = [float(i) for i in ll.split(',')]
if direction == 'вверх' or direction == 'вниз':
if ll[1] + spn_y <= 90 and direction == 'вверх':
ll[1] = ll[1] + spn_y
elif ll[1] - spn_y >= -90 and direction == 'вниз':
ll[1] -= spn_y
else:
if ll[0] + spn_x <= 180 and direction == 'вправо':
ll[0] += spn_x
elif ll[0] - spn_x >= -180:
ll[0] -= spn_x
# ll[0] = ll[0] + spn_x if direction == 'вправо' else ll[0] - spn_x
return ','.join([str(i) for i in ll])
# функция получает объект со списком объектов геокодера
# она пробегается по всем объектам и если находит нужно поле то останавливает работу и
# возвращает полученный адрес
def range_geoobject(geoobject):
address = None
for i in geoobject:
try:
address = i["GeoObject"]["metaDataProperty"] \
["GeocoderMetaData"]["text"]
break
except Exception as e:
pass
if address is not None:
return address
return '---адрес не был получен---'
# функция по координатам находит находит адрес объекта используя выше указанную функцию
def get_address(ll):
req = f"http://geocode-maps.yandex.ru/1.x/?apikey={key_geocoder}&geocode={ll}&format=json"
response = requests.get(req)
with open('response.json', 'w') as jsonfile:
json.dump(response.json(), jsonfile, ensure_ascii=False)
address = range_geoobject(
response.json()['response']["GeoObjectCollection"]["featureMember"])
return address
def get_postcode_range(toponym):
postcode = None
for i in toponym:
try:
postcode = i['GeoObject']['metaDataProperty']['GeocoderMetaData']['Address'][
'postal_code']
except Exception:
pass
if postcode is None:
return ''
else:
return postcode
def get_postcode_low(points):
try:
ll = ','.join(points[0].split(',')[:2])
req = f"http://geocode-maps.yandex.ru/1.x/?apikey={key_geocoder}&geocode={ll}&format=json"
response = requests.get(req)
with open('response.json', 'w') as jsonfile:
json.dump(response.json(), jsonfile, ensure_ascii=False)
toponym = response.json()["response"]["GeoObjectCollection"]["featureMember"]
postcode = get_postcode_range(toponym)
return postcode
except Exception:
return ''
def get_ll_by_click(ll, spn_x, spn_y, coords):
x, y = coords
ll = [float(i) for i in ll.split(',')]
ll = [ll[0] - spn_x * 1.5, ll[1] + spn_y * 3 / 4]
ll_x = (x / 650) * 3 * spn_x + ll[0]
if 375 > y > 225:
plus = 50
elif y > 225:
plus = 80
else:
plus = 20
ll_y = ll[1] - ((y + plus) / 450) * 4 * spn_y / 3
ll = f"{ll_x},{ll_y}"
return ll
| ecol-master/Hackaton_AI | yandex_map/maps/geocoder.py | geocoder.py | py | 3,846 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "apikeys.key_geocoder",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apikeys.key_geocoder",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "requests.get",
... |
43649070052 | import matplotlib.pyplot as plt
import numpy as np
from scipy import misc
from math import sqrt
#--------------------------------------------------
# code0.py : Première définitions
#--------------------------------------------------
def u(x): # définition de la fonction u
return x**4
def grad1(f, x0, dx = 1.0):
#Donne le gradient en dimension 1
return misc.derivative(f, x0, dx, n = 1)
def laplac1(f, x0, dx = 1.0):
#Donne le Laplacien en dimension 1
return misc.derivative(f, x0, dx, n = 2)
# On remarque que dans notre cas
# Le gradient est la dérivée
# Le Laplacien est la dérivée seconde
#--------------------------------------------------
# code1.py : Graphes
#--------------------------------------------------
fig1 = plt.figure(figsize=(9, 6))
N = 100
x = np.linspace(0, 5, N)
y = u(x)
plt.plot(x,y, label = "$u(x)$")
plt.title(" $ u(x) $ sur [0,5] ", fontsize = 17)
plt.xlabel('x', fontsize = 15)
plt.ylabel('u', fontsize = 15)
plt.legend()
plt.grid(True)
plt.show()
#--------------------------------------------------
# code2.py : Graphes
#--------------------------------------------------
fig2 = plt.figure(figsize = (9, 6))
gradu = np.zeros(N)
laplacu = np.zeros(N)
x = np.linspace(0, 5, N)
for i in range(N):
laplacu[i] = laplac1(u, x[i])
gradu[i] = grad1(u, x[i])
plt.plot(x, y, label = 'u(x)')
plt.plot(x, laplacu, label = '$ \Delta u(x) $')
plt.plot(x, gradu, label = '$\nabla u(x)$')
plt.title("$u$ son Gradient et Laplacien ", fontsize = 17)
plt.xlabel('x', fontsize = 15)
plt.legend()
plt.grid(True)
plt.show()
#--------------------------------------------------
# code3.py : Définitions
#--------------------------------------------------
def diff_finie1(u, h, a, b):
x = np.arange(a, b, h)
N = np.size(x)
u_x = np.zeros(N-2)
for i in range(1,N-1):
u_x[i-1] = (u(x[i+1]) - u(x[i]))/h
return u_x
def diff_finie2(u, h, a, b):
x = np.arange(a, b, h)
N = np.size(x)
u_xx = np.zeros(N-2)
for i in range(1,N-1):
u_xx[i-1] = (u(x[i+1]) - 2*u(x[i]) + u(x[i-1]))/(h**2)
return u_xx
#--------------------------------------------------
# code4.py : Graphes
#--------------------------------------------------
fig3 = plt.figure(figsize = (9, 6))
N = 100
x = np.linspace(0, 5, N)
y = u(x)
u_x = diff_finie1(u, 5/N, 0, 5)
u_xx = diff_finie2(u, 5/N, 0, 5)
plt.plot(x,y, label = 'u(x)')
plt. plot(x[1:N-1], u_x, label = "u'(x)")
plt.plot(x[1:N-1], u_xx, label = "u''(x)")
plt.legend()
plt.grid(True)
plt.xlabel('x', fontsize = 15)
plt.title("Visualisation par différences finies", fontsize = 20)
plt.show()
#--------------------------------------------------
# code5.py : Défintions
#--------------------------------------------------
def du(x):
return 4*x**3
def d2u(x):
return 12*x**2
#--------------------------------------------------
# code6.py : Code erreur et approximation
#--------------------------------------------------
erreur_Laplacien = np.zeros(99)
erreur_gradient = np.zeros(99)
h = np.zeros(99)
for n in range(3,101):
h[n-3] = 1/(n-1)
x = np.arange(0, 5, 1/(n-1))
k = np.size(x)
grad = du(x[1:k-1])
lapla = d2u(x[1:k-1])
gradh = diff_finie1(u, 1/(n-1), 0, 5)
laplah = diff_finie2(u, 1/(n-1), 0, 5)
erreur_Laplacien[n-3] = sqrt(sum((lapla - laplah)**2)) # pour calculer la norme 2 discrète
erreur_gradient[n-3] = sqrt(sum((grad - gradh)**2)) # pour calculer la norme 2 discrète
fig4 = plt.figure(figsize = (9, 6))
plt.plot(h, erreur_Laplacien, label = 'erreur laplacien')
plt.plot(h,h**(3/2), label = " $h^{3/2}$")
plt.legend()
plt.grid(True)
plt.xlabel('pas h', fontsize = 15)
plt.xscale('log')
plt.yscale('log')
plt.title("Erreur du Laplacien numérique (échelle logarithmique)", fontsize = 20)
fig5 = plt.figure(figsize = (9, 6))
plt.plot(h, erreur_gradient, label = "erreur gradient")
plt.plot(h,h**(1/2), label = "h")
plt.legend()
plt.xlabel('pas h', fontsize = 15)
plt.grid(True)
plt.xscale('log')
plt.yscale('log')
plt.title("Erreur du gradient numérique (échelle logarithmique)", fontsize = 20)
plt.show()
#--------------------------------------------------
# code8.py : Amélioriation du derivatives1d.py
#--------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# NUMERICAL PARAMETERS
#Number of grid points
L=2*3.141592 #2*math.pi
err_Tx = np.zeros(199)
err_Txx = np.zeros(199)
pas = np.zeros(199)
for NX in range(3, 201):
# Initialisation
dx = L/(NX-1) #Grid step (space)
pas[NX-3] = dx
x = np.linspace(0.0,L,NX)
T = np.sin(x)
Txe = np.cos(x)
Txxe = -np.sin(x)
Tx = np.zeros((NX))
Txx = np.zeros((NX))
#discretization of the second order derivative (Laplacian)
for j in range (1, NX-1):
Tx[j] = (T[j+1]-T[j-1])/(dx*2)
Txx[j] = (T[j-1]-2*T[j]+T[j+1])/(dx**2)
#Tx and Txx on boundaries
# use extrapolation in order to have (T,x),xx=0
#(T,x),xx= (Tx0 -2*Tx1+Tx2) =0
Tx[0] = 2*Tx[1]-Tx[2]
#use lower order formula (1st order)
Tx[NX-1] = (T[NX-2]-T[NX-3])/dx
Txx[0] = 2*Txx[1]-Txx[2]
Txx[NX-1] = 2*Txx[NX-2]-Txx[NX-3]
err_Tx[NX-3] = np.sum(abs(Tx-Txe))
err_Txx[NX-3] = np.sum(abs(Txx-Txxe))
plt.figure(1)
plt.plot(x,T, label = "graphe de sinus")
plt.title(u'Function sinus')
plt.xlabel(u'$x$', fontsize=20)
plt.ylabel(u'$T$', fontsize=26, rotation=90)
plt.legend()
plt.figure(2)
plt.xlabel(u'$x$', fontsize=26)
plt.ylabel(u'$Tx$', fontsize=26, rotation=90)
plt.plot(x,Tx, label='Tx')
plt.plot(x,np.log10(abs(Tx-Txe)), label='Error')
plt.title(u'First Derivative Evaluation (NX = 200)')
plt.legend()
plt.figure(3)
plt.xlabel(u'$x$', fontsize=26)
plt.ylabel(u'$Txx$', fontsize=26, rotation=90)
plt.plot(x,Txx,label='Txx')
plt.plot(x,np.log10(abs(Txx-Txxe)),label='Error')
plt.title(u'Second Derivative Evaluation (NX = 200)')
plt.legend()
plt.figure(4)
plt.plot(pas, err_Tx, label = "err_Tx")
plt.plot(pas, 1/2*pas**(3/2), label = '$h^{3/2}$')
plt.xlabel('pas')
plt.xscale('log')
plt.yscale('log')
plt.title("Courbe d'erreur de la dérivée numérique en fonction du pas")
plt.legend()
plt.figure(5)
plt.plot(pas, err_Txx, label = "err_Txx")
plt.plot(pas, 1/5*pas, label = 'h')
plt.xlabel('pas')
plt.xscale('log')
plt.yscale('log')
plt.title("Courbe d'erreur de la dérivée seconde numérique en fonction du pas")
plt.legend()
plt.show()
| oungaounga/project21808112.github.io | AllCodesProjet1.py | AllCodesProjet1.py | py | 6,834 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "scipy.misc.derivative",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.misc",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "scipy.misc.derivative",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.misc",
... |
74736843234 | from unittest.mock import Mock
import datetime
import pytest
from pytest_lazyfixture import lazy_fixture
from boxsdk.util import datetime_formatter
@pytest.mark.parametrize(
"valid_datetime_format",
(
"2035-03-04T10:14:24+14:00",
"2035-03-04T10:14:24-04:00",
lazy_fixture("mock_datetime_rfc3339_str"),
),
)
def test_leave_datetime_string_unchanged_when_rfc3339_formatted_str_provided(
valid_datetime_format,
):
formatted_str = datetime_formatter.normalize_date_to_rfc3339_format(
valid_datetime_format
)
assert formatted_str == valid_datetime_format
@pytest.mark.parametrize(
"other_datetime_format",
(
"2035-03-04T10:14:24.000+14:00",
"2035-03-04 10:14:24.000+14:00",
"2035/03/04 10:14:24.000+14:00",
"2035/03/04T10:14:24+14:00",
"2035/3/4T10:14:24+14:00",
lazy_fixture('mock_timezone_aware_datetime_obj'),
),
)
def test_normalize_date_to_rfc3339_format_timezone_aware_datetime(
other_datetime_format,
mock_datetime_rfc3339_str,
):
formatted_str = datetime_formatter.normalize_date_to_rfc3339_format(
other_datetime_format
)
assert formatted_str == mock_datetime_rfc3339_str
@pytest.mark.parametrize(
"timezone_naive_datetime",
(
"2035-03-04T10:14:24.000",
"2035-03-04T10:14:24",
lazy_fixture('mock_timezone_naive_datetime_obj')
),
)
def test_add_timezone_info_when_timezone_naive_datetime_provided(
timezone_naive_datetime,
mock_timezone_naive_datetime_obj,
):
formatted_str = datetime_formatter.normalize_date_to_rfc3339_format(
timezone_naive_datetime
)
local_timezone = datetime.datetime.now().tzinfo
expected_datetime = mock_timezone_naive_datetime_obj.astimezone(
tz=local_timezone
).isoformat(timespec="seconds")
assert formatted_str == expected_datetime
def test_return_none_when_none_provided():
assert datetime_formatter.normalize_date_to_rfc3339_format(None) is None
@pytest.mark.parametrize("inavlid_datetime_object", (Mock(),))
def test_throw_type_error_when_invalid_datetime_object_provided(
inavlid_datetime_object,
):
with pytest.raises(TypeError):
datetime_formatter.normalize_date_to_rfc3339_format(inavlid_datetime_object)
| box/box-python-sdk | test/unit/util/test_datetime_formatter.py | test_datetime_formatter.py | py | 2,309 | python | en | code | 395 | github-code | 1 | [
{
"api_name": "boxsdk.util.datetime_formatter.normalize_date_to_rfc3339_format",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "boxsdk.util.datetime_formatter",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 11,
... |
75068479392 | import bilby
import numpy as np
import matplotlib.pyplot as plt
from bilby.core.utils import logger
import scipy.special
import matplotlib
import json
from distutils.version import LooseVersion
import os
import copy
from ..PE.prior import Conditional_Dl2_prior_from_mu_rel_prior, condition_function
def MakeLensedParamsFromFirstEvent(img1_parameters, LensingParameters):
"""
Function making the parameters to be injected for the second image, based on the
parameters of the first event and the lensing parameters
ARGS:
-----
- img1_parameters: dictionary containing the parameters injected for the
first event
- LensingParameters: dictionary containing the lensing paramters that GOLUM should
recover (relative magnification, time delay, morse factor difference)
RETURNS:
--------
- lensed_parameters: dictionary with updated parameters accounting for lensing and
usable to inject the second event
"""
lensed_parameters = img1_parameters.copy()
lensed_parameters['luminosity_distance'] = np.sqrt(LensingParameters['relative_magnification']) * img1_parameters['luminosity_distance']
lensed_parameters['geocent_time'] = LensingParameters['delta_t'] + img1_parameters['geocent_time']
lensed_parameters['n_phase'] = LensingParameters['delta_n'] + img1_parameters['n_phase']
return lensed_parameters
def read_image1_file(file):
"""
Function reading the json file from the first run to
extract the posteriors and the injection parameters.
This is used instead of bilby.result.read_in_result
to ensure backward compatibility. This can be replaced
by the usual bily functions when the data has been
generated in the same conditions
ARGS:
-----
- file: name of the file (with full path) containing the
results of the first run
RETURNS:
-------
- injection_parameters: dictionary of the injected parameters.
is None if they are not included in the file
- posteriors: dictionary containing the posterior samples of the
first run
"""
with open(file) as f:
result = json.load(f)
if result['injection_parameters'] is not None:
injection_parameters = result['injection_parameters']
else:
print("WARNING: no injection parameters in file, returning NONE")
injection_parameters = None
posteriors = dict()
for key in result['posterior']['content']:
posteriors[key] = result['posterior']['content'][key]
if 'log_likelihood' in posteriors.keys():
posteriors.pop('log_likelihood')
if 'log_prior' in posteriors.keys():
posteriors.pop('log_prior')
return injection_parameters, posteriors
def ReweighingEvent1(LensedResults, LensingLikelihood, ifos, waveform_generator, n_points, im1_posteriors, n_img):
"""
Function doing the reweighing in practice. It can be used alone
or is called by WeightsEvent1 where the info is also written into
a json file.
ARGS:
-----
- LensedResults: result object coming from the GOLUM run
- LensingLikelihood: the GOLUM liklihood object used for the run
- ifos: the interferometers used for the run
- waveform_generator: waveform generator used for the run. It is
supposed to take LensedBBHmodel in as
freqency domain source model
- n_points: int, the number of points to be used in the reweighing process
- n_img: int, the number of the image used for the analysis (this is an
identifier for the run)
- im1_posteriors, path to the first image file. If None, we reuse the samples from
the golum run
RETURNS:
--------
Outdict with keys:
- 'RefImgSamples%i'%n_img: the dictionary of reweighed samples
obtained via the reweighing process for the reference image
- 'GolumSamples%i'%n_img: the dictionary with the reweighed GOLUM samples
"""
idxs = np.random.randint(0, len(LensedResults.posterior['delta_t']), n_points)
# sample the events
samps_ev2 = {key : LensedResults.posterior[key][idxs].to_list() for key in ['delta_t', 'delta_n',
'relative_magnification', 'log_likelihood']}
# make the object needed to analyze the first image
Ev1data = bilby.gw.GravitationalWaveTransient(ifos,
waveform_generator= waveform_generator)
# draw random samples to evaluate the second likelihood
if im1_posteriors is not None:
results_1 = bilby.result.read_in_result(filename = im1_posteriors)
samples_ev1 = results_1.posterior.copy()
samples_ev1.pop('log_prior')
samples_ev1.pop('log_likelihood')
else:
samples_ev1 = LensingLikelihood.samples.copy()
idx2 = np.random.randint(0, len(samples_ev1['geocent_time']), n_points)
LogL_Pd1 = np.zeros(n_points)
for i in range(len(idx2)):
if i%1000 == 0:
print("Reweighing sample %i / %i"%(i, len(idx2)))
ind1 = idxs[i]
ind2 = idx2[i]
for key in samples_ev1.keys():
Ev1data.parameters[key] = samples_ev1[key][ind2]
Ev1data.parameters['n_phase'] = samps_ev2['delta_n'][ind1] + Ev1data.parameters['n_phase']
Ev1data.parameters['luminosity_distance'] = np.sqrt(samps_ev2['relative_magnification'][ind1])*\
Ev1data.parameters['luminosity_distance']
Ev1data.parameters['geocent_time'] = samps_ev2['delta_t'][ind1] + Ev1data.parameters['geocent_time']
LogL_Pd1[i] = Ev1data.log_likelihood()
LogL_marg = np.array(samps_ev2['log_likelihood'])
weights = np.exp((LogL_Pd1-LogL_marg) - np.max(LogL_Pd1-LogL_marg))
# same the samples selected from the first event
SampsImg1 = dict()
for key in samples_ev1.keys():
SampsImg1[key] = [samples_ev1[key][ind] for ind in idx2]
# save the samples for the GOLUM part
SampsGolum = dict()
for key in LensedResults.posterior:
SampsGolum[key] = [LensedResults.posterior[key][ind] for ind in idxs]
# make the dictionary with reweighed samples
inds = len(SampsImg1['geocent_time'])
IdxsRew = np.random.choice(inds, size = n_points, p = weights/np.sum(weights))
Img1RewSamp = dict()
for key in SampsImg1.keys():
Img1RewSamp[key] = list(np.array(SampsImg1[key])[IdxsRew])
GolRewSamp = dict()
for key in SampsGolum.keys():
GolRewSamp[key] = list(np.array(SampsGolum[key])[IdxsRew])
outDict = {'RefImgSamples%i'%n_img : Img1RewSamp,
'GolumSamples%i'%n_img : GolRewSamp}
return outDict
def ReweighingEvent1_Dl2Free(LensedResults, LensingLikelihood, ifos, waveform_generator, n_points, im1_posteriors, n_img):
"""
Same as other function, just for other parametrization
Function doing the reweighing in practice. It can be used alone
or is called by WeightsEvent1 where the info is also written into
a json file.
Updated version
ARGS:
-----
- LensedResults: result object coming from the GOLUM run
- LensingLikelihood: the GOLUM liklihood object used for the run
- ifos: the interferometers used for the run
- waveform_generator: waveform generator used for the run. It is
supposed to take LensedBBHmodel in as
freqency domain source model
- n_points: int, the number of points to be used in the reweighing process
- n_img: int, the number of the image used for the analysis (this is an
identifier for the run)
- im1_posteriors, path to the first image file. If None, we reuse the samples from
the golum run
RETURNS:
--------
- outDict: a dictionary containing the reweighed samples.
- 'RefImgSamples%i'%n_img: the dictionary of reweighed samples
obtained via the reweighing process for the reference image
- 'GolumSamples%i'%n_img: the dictionary with the reweighed
GOLUM samples weights of the run
"""
idxs = np.random.randint(0, len(LensedResults.posterior['geocent_time']), n_points)
# sample the events
samps_ev2 = dict()
for key in ['luminosity_distance', 'geocent_time', 'n_phase', 'log_likelihood']:
samps_ev2[key] = [LensedResults.posterior[key][i] for i in idxs]
# make the object needed to analyze the first image
Ev1data = bilby.gw.GravitationalWaveTransient(ifos,
waveform_generator= waveform_generator)
# draw random samples to evaluate the second likelihood
if im1_posteriors is not None:
results_1 = bilby.result.read_in_result(filename = im1_posteriors)
samples_ev1 = results_1.posterior.copy()
samples_ev1.pop('log_prior')
samples_ev1.pop('log_likelihood')
else:
samples_ev1 = LensingLikelihood.samples.copy()
idx2 = np.random.randint(0, len(samples_ev1['geocent_time']), n_points)
LogL_Pd1 = np.zeros(n_points)
for i in range(len(idx2)):
if i%1000 == 0:
print("Reweighing sample %i / %i"%(i, len(idx2)))
ind1 = idxs[i]
ind2 = idx2[i]
for key in samples_ev1.keys():
Ev1data.parameters[key] = samples_ev1[key][ind2]
Ev1data.parameters['n_phase'] = samps_ev2['n_phase'][ind1]
Ev1data.parameters['luminosity_distance'] = samps_ev2['luminosity_distance'][ind1]
Ev1data.parameters['geocent_time'] = samps_ev2['geocent_time'][ind1]
LogL_Pd1[i] = Ev1data.log_likelihood()
LogL_marg = np.array(samps_ev2['log_likelihood'])
weights = np.exp((LogL_Pd1-LogL_marg) - np.max(LogL_Pd1-LogL_marg))
Weights = weights.tolist()
LogLMarg = LogL_marg.tolist()
LogL_Pd1 = LogL_Pd1.tolist()
# same the samples selected from the first event
SampsImg1 = dict()
for key in samples_ev1.keys():
SampsImg1[key] = [samples_ev1[key][ind] for ind in idx2]
# save the samples for the GOLUM part
SampsGolum = dict()
for key in LensedResults.posterior:
SampsGolum[key] = [LensedResults.posterior[key][ind] for ind in idxs]
# make the dictionary with reweighed samples
inds = len(SampsImg1['geocent_time'])
IdxsRew = np.random.choice(inds, size = n_points, p = weights/np.sum(weights))
Img1RewSamp = dict()
for key in SampsImg1.keys():
Img1RewSamp[key] = np.array(SampsImg1[key])[IdxsRew]
GolRewSamp = dict()
for key in SampsGolum.keys():
GolRewSamp[key] = np.array(SampsGolum[key])[IdxsRew]
outDict = {'RefImgSamples%i'%n_img : Img1RewSamp.to,
'GolumSamples%i'%n_img : GolRewSamp}
return outDict
def read_reweighted_posterior(file, n_img = 2):
"""
Function to read the posterior files
ARGS:
-----
- file: the file in which the posteriors are stored
- n_img: the image for which the posteriors should be read
RETURNS:
--------
- RefImgSamples: the reweighted samples for the
reference image
- GolumSamples: the reweighed golum samples for the
run under consideration
"""
if os.path.isfile(file):
with open(file) as f:
result = json.load(f)
if ('RefImgSamples%i'%(n_img)) in result.keys():
# correct image and file are there
RefImgSamples = result['RefImgSamples%i'%n_img]
GolumSamples = result['GolumSamples%i'%n_img]
return RefImgSamples, GolumSamples
else:
raise NameError("The imge %i does not exist in file"%n_img)
else:
raise NameError('File %s not found'%file)
def WeightsEvent1(LensedResults, LensingLikelihood, ifos, waveform_generator, outdir, label, im1_posteriors = None, n_points = int(1e5), dl2_free = False, n_img = 2):
"""
Function reweighing the posteriors and saving the information into
the reweighing file. It automatically adds the information in the
existing file (if several images) or makes the file if it is not existing yet.
If the reweighing has already been done with the same outdir, label and image
numer, these values are given back.
ARGS:
-----
- LensedResults: result object coming from the GOLUM run
- LensingLikelihood: the GOLUM liklihood object used for the run
- ifos: the interferometers used for the run
- waveform_generator: waveform generator used for the run. It is
supposed to take LensedBBHmodel in as
freqency domain source model
- outdir: the outdirectory of the run
- label: label used for the run
- n_points: int, the number of points to be used in the reweighing process
- dl2_free: bool. If true, means the run was done with dl2, tc2 and mu as
- n_img: int, the number of the image used for the analysis (this is an
identifier for the run)
RETURNS:
--------
- RefImgSamples: The reweighed samples for the reference image
- GolumSamples: the reweighed samples for the GOLUM run
"""
# first check whether the reweighing has already been done
if os.path.isfile('%s/%s_reweight.json' %(outdir, label)):
with open('%s/%s_reweight.json' %(outdir, label))as f:
result = json.load(f)
if 'RefImgSamples%i'%n_img in result.keys():
print("The run has already been done, LOADING the samples")
RefImgSamples = result['RefImgSamples%i'%n_img]
GolumSamples = result['GolumSamples%i'%n_img]
return RefImgSamples, GolumSamples
else:
print("File already exists but no samples for image %i, computing samples for this image"%n_img)
if dl2_free:
outDict = ReweighingEvent1_Dl2Free(LensedResults, LensingLikelihood, ifos, waveform_generator, n_points, im1_posteriors, n_img)
else:
outDict = ReweighingEvent1(LensedResults, LensingLikelihood, ifos, waveform_generator, n_points, im1_posteriors, n_img)
# add the new reweighed samples to the file
with open('%s/%s_reweight.json'%(outdir, label), 'r+') as f:
out = json.load(f)
out.update(outDict)
f.seek(0)
json.dump(out, f, indent = 4)
f.close()
return outDict['RefImgSamples%i'%n_img], outDict['GolumSamples%i'%n_img]
else:
print("File %s/%s_reweight.json does not exist"%(outdir, label))
if dl2_free:
outDict = ReweighingEvent1_Dl2Free(LensedResults, LensingLikelihood, ifos, waveform_generator, n_points, im1_posteriors, n_img)
else:
outDict = ReweighingEvent1(LensedResults, LensingLikelihood, ifos, waveform_generator, n_points, im1_posteriors, n_img)
# need to make the file
with open('%s/%s_reweight.json' %(outdir, label), 'w') as f:
json.dump(outDict, f, indent = 4)
return outDict['RefImgSamples%i'%n_img], outDict['GolumSamples%i'%n_img]
def read_in_reweighted(file, n_img = 2):
"""
Utility function to read in the reweighed posteriors
from a file.
ARGS:
-----
- file: the file that should be read
- n_img: (default is 2): the image for which the posteriors
are wanted
"""
with open(file) as f:
out = json.load(f)
return out['RefImgSamples%i'%n_img], out['GolumSamples%i'%i]
| lemnis12/golum | golum/Tools/utils.py | utils.py | py | 16,015 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.sqrt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_n... |
8556882106 | from .settings import DATABASE, PATH
import glob
import os
import pandas as pd
import sqlalchemy as sqla
import argparse
from argparse import RawDescriptionHelpFormatter
dataset_id = {
'srag': 1,
'sragflu': 2,
'obitoflu': 3,
'sragcovid': 4,
'obitocovid': 5,
'obito': 6
}
# ### 1.2 Scale
scale_id = {
'incidência': 1,
'casos': 2
}
# ### 1.3 Situation
situation_id = {
'unknown': 1,
'estimated': 2,
'stable': 3,
'incomplete': 4
}
# ### 1.4 Territory Type
territory_type_id = {
'Estado': 1,
'Regional': 2,
'Região': 3,
'País': 4
}
# ### 1.5 Region id conversion
region_id = {
'BR': 0,
'RegN': 1001,
'RegL': 1002,
'RegC': 1003,
'RegS': 1004,
'RegNI': 9999,
'N': 1,
'NE': 2,
'SE': 3,
'S': 4,
'CO': 5,
'RNI': 9
}
# ### Territory Table
df_territory = pd.DataFrame([
{'id': 11, 'initials': 'RO', 'name': 'Rondônia',
'territory_type_id': 1},
{'id': 12, 'initials': 'AC', 'name': 'Acre', 'territory_type_id': 1},
{'id': 13, 'initials': 'AM', 'name': 'Amazonas',
'territory_type_id': 1},
{'id': 14, 'initials': 'RR', 'name': 'Roraima',
'territory_type_id': 1},
{'id': 15, 'initials': 'PA', 'name': 'Pará', 'territory_type_id': 1},
{'id': 16, 'initials': 'AP', 'name': 'Amapá', 'territory_type_id': 1},
{'id': 17, 'initials': 'TO', 'name': 'Tocantins',
'territory_type_id': 1},
{'id': 21, 'initials': 'MA', 'name': 'Maranhão',
'territory_type_id': 1},
{'id': 22, 'initials': 'PI', 'name': 'Piauí', 'territory_type_id': 1},
{'id': 23, 'initials': 'CE', 'name': 'Ceará', 'territory_type_id': 1},
{'id': 24, 'initials': 'RN', 'name': 'Rio Grande do Norte',
'territory_type_id': 1},
{'id': 25, 'initials': 'PB', 'name': 'Paraíba',
'territory_type_id': 1},
{'id': 26, 'initials': 'PE', 'name': 'Pernambuco',
'territory_type_id': 1},
{'id': 27, 'initials': 'AL', 'name': 'Alagoas',
'territory_type_id': 1},
{'id': 28, 'initials': 'SE', 'name': 'Sergipe',
'territory_type_id': 1},
{'id': 29, 'initials': 'BA', 'name': 'Bahia', 'territory_type_id': 1},
{'id': 31, 'initials': 'MG', 'name': 'Minas Gerais',
'territory_type_id': 1},
{'id': 32, 'initials': 'ES', 'name': 'Espírito Santo',
'territory_type_id': 1},
{'id': 33, 'initials': 'RJ', 'name': 'Rio de Janeiro',
'territory_type_id': 1},
{'id': 35, 'initials': 'SP', 'name': 'São Paulo',
'territory_type_id': 1},
{'id': 41, 'initials': 'PR', 'name': 'Paraná', 'territory_type_id': 1},
{'id': 42, 'initials': 'SC', 'name': 'Santa Catarina',
'territory_type_id': 1},
{'id': 43, 'initials': 'RS', 'name': 'Rio Grande do Sul',
'territory_type_id': 1},
{'id': 50, 'initials': 'MS', 'name': 'Mato Grosso do Sul',
'territory_type_id': 1},
{'id': 51, 'initials': 'MT', 'name': 'Mato Grosso',
'territory_type_id': 1},
{'id': 52, 'initials': 'GO', 'name': 'Goiás', 'territory_type_id': 1},
{'id': 53, 'initials': 'DF', 'name': 'Distrito Federal',
'territory_type_id': 1},
{'id': 99, 'initials': 'NI', 'name': 'Não informado',
'territory_type_id': 1},
{'id': 0, 'initials': 'BR', 'name': 'Brasil', 'territory_type_id': 4},
{'id': 1003, 'initials': 'RegC', 'name': 'Regional Centro',
'territory_type_id': 2},
{'id': 1002, 'initials': 'RegL', 'name': 'Regional Leste',
'territory_type_id': 2},
{'id': 1001, 'initials': 'RegN', 'name': 'Regional Norte',
'territory_type_id': 2},
{'id': 1004, 'initials': 'RegS', 'name': 'Regional Sul',
'territory_type_id': 2},
{'id': 9999, 'initials': 'RegSNI', 'name': 'Regional não informada',
'territory_type_id': 2},
{'id': 1, 'initials': 'N', 'name': 'Norte', 'territory_type_id': 3},
{'id': 2, 'initials': 'NE', 'name': 'Nordeste',
'territory_type_id': 3},
{'id': 3, 'initials': 'SE', 'name': 'Sudeste', 'territory_type_id': 3},
{'id': 5, 'initials': 'CO', 'name': 'Centro-oeste',
'territory_type_id': 3},
{'id': 4, 'initials': 'S', 'name': 'Sul', 'territory_type_id': 3},
{'id': 9, 'initials': 'RNI', 'name': 'Região não informada',
'territory_type_id': 3},
])
contingency_name_from_id = {
0: 'Nível basal',
1: 'Nível 0',
2: 'Nível 1',
3: 'Nível 2',
}
def update_data_files(force: bool):
path_data = os.path.join(PATH, '../../data/data')
update_params = '-nc' if not force else '-N'
wget_prefix = (
('wget %s ' % update_params) +
'https://raw.githubusercontent.com/FluVigilanciaBR/data/master/data'
)
command = '''cd %(path_data)s; \
%(wget_prefix)s/br-states.json; \
%(wget_prefix)s/clean_data_epiweek-weekly-incidence_w_situation.csv && \
%(wget_prefix)s/contingency_level.csv && \
%(wget_prefix)s/current_estimated_values.csv && \
%(wget_prefix)s/historical_estimated_values.csv && \
%(wget_prefix)s/mem-report.csv && \
%(wget_prefix)s/mem-typical.csv && \
%(wget_prefix)s/season_level.csv && \
%(wget_prefix)s/weekly_alert.csv && \
%(wget_prefix)s/delay_table.csv''' % {
'path_data': path_data,
'wget_prefix': wget_prefix
}
print(command.replace('&&', ' && \ \n'))
os.system(command)
print('[II] DONE!')
def get_filename_from_path(file_path: str):
"""
"""
return file_path.split(os.path.sep)[-1].split('.')[0]
def migrate_current_estimates(df):
migration_rules = {
'UF': 'territory_id',
'SRAG': 'value',
'Tipo': 'territory_type', # Not needed in the table
'Situation': 'situation_id',
'rolling_average': 'rolling_average',
'50%': 'median',
'2.5%': 'ci_lower',
'97.5%': 'ci_upper',
'5%': 'ci_lower_90',
'95%': 'ci_upper_90',
'25%': 'ci_lower_q1',
'75%': 'ci_upper_q3',
'bounded_97.5%': 'ci_upper_bounded',
'cntry_percentage': 'country_percentage',
'L0': 'low_level',
'L1': 'epidemic_level',
'L2': 'high_level',
'L3': 'very_high_level',
'Run date': 'run_date',
'dado': 'dataset_id',
'escala': 'scale_id'
}
# rename columns
df.rename(
columns=migration_rules, inplace=True
)
# apply categories
df.dataset_id = df.dataset_id.map(dataset_id)
df.scale_id = df.scale_id.map(scale_id)
df.situation_id = df.situation_id.map(situation_id)
regions_indeces = df.territory_id.isin([
'BR', 'RegN', 'RegL', 'RegC', 'RegS', 'RegNI',
'N', 'NE', 'SE', 'S', 'CO', 'RNI'
])
df.loc[regions_indeces, 'territory_id'] = df.loc[
regions_indeces, 'territory_id'
].map(region_id)
df.territory_id = df.territory_id.astype(int)
# remove unnecessary fields
df.drop(['territory_type'], axis=1, inplace=True)
# primary_keys
pks = ['dataset_id', 'scale_id', 'territory_id', 'epiyear',
'epiweek']
df.set_index(pks, inplace=True)
return df
def migrate_historical_estimates(df):
migration_rules = {
'UF': 'territory_id',
'SRAG': 'value',
'Tipo': 'territory_type', # Not needed in the table
'Situation': 'situation_id',
'50%': 'median',
'2.5%': 'ci_lower',
'97.5%': 'ci_upper',
'5%': 'ci_lower_90',
'95%': 'ci_upper_90',
'25%': 'ci_lower_q1',
'75%': 'ci_upper_q3',
'bounded_97.5%': 'ci_upper_bounded',
'cntry_percentage': 'country_percentage',
'L0': 'low_level',
'L1': 'epidemic_level',
'L2': 'high_level',
'L3': 'very_high_level',
'Run date': 'run_date',
'dado': 'dataset_id', # or origin
'escala': 'scale_id'
}
df.rename(
columns=migration_rules, inplace=True
)
# apply categories
df.dataset_id = df.dataset_id.map(dataset_id)
df.scale_id = df.scale_id.map(scale_id)
df.situation_id = df.situation_id.map(situation_id)
regions_indeces = df.territory_id.isin([
'BR', 'RegN', 'RegL', 'RegC', 'RegS', 'RegNI',
'N', 'NE', 'SE', 'S', 'CO', 'RNI'
])
df.loc[regions_indeces, 'territory_id'] = df.loc[
regions_indeces, 'territory_id'
].map(region_id)
df.territory_id = df.territory_id.astype(int)
# remove unnecessary fields
df.drop(['territory_type'], axis=1, inplace=True)
# primary_keys
pks = [
'dataset_id', 'scale_id', 'territory_id',
'base_epiyear', 'base_epiweek',
'epiyear', 'epiweek'
]
df.set_index(pks, inplace=True)
df.head()
return df
def migrate_clean_data_epiweek(df):
migration_rules = {
'0-4 anos': 'years_0_4',
'10-19 anos': 'years_10_19',
'2-4 anos': 'years_2_4',
'20-29 anos': 'years_20_29',
'30-39 anos': 'years_30_39',
'40-49 anos': 'years_40_49',
'5-9 anos': 'years_5_9',
'50-59 anos': 'years_50_59',
'60+ anos': 'years_60_or_more',
'< 2 anos': 'years_lt_2',
'DELAYED': 'delayed',
'FLU_A': 'flu_a',
'FLU_B': 'flu_b',
'INCONCLUSIVE': 'inconclusive',
'Idade desconhecida': 'unknown_age',
'NEGATIVE': 'negative',
'NOTTESTED': 'not_tested',
'OTHERS': 'others',
'POSITIVE_CASES': 'positive_cases',
'SRAG': 'value',
'Situation': 'situation_id',
'TESTING_IGNORED': 'testing_ignored',
'Tipo': 'territory_type', # Not needed in the table
'UF': 'territory_id',
'Unidade da Federação': 'state_country_name', # Not needed
'VSR': 'vsr',
'dado': 'dataset_id',
'escala': 'scale_id',
'sexo': 'gender'
}
df.rename(
columns=migration_rules, inplace=True
)
# apply categories
df.dataset_id = df.dataset_id.map(dataset_id)
df.scale_id = df.scale_id.map(scale_id)
df.situation_id = df.situation_id.map(situation_id)
regions_indeces = df.territory_id.isin([
'BR', 'RegN', 'RegL', 'RegC', 'RegS', 'RegNI',
'N', 'NE', 'SE', 'S', 'CO', 'RNI'
])
df.loc[regions_indeces, 'territory_id'] = df.loc[
regions_indeces, 'territory_id'
].map(region_id)
df.territory_id = df.territory_id.astype(int)
# remove unnecessary fields
df.drop([
'state_country_name', 'territory_type'
], axis=1, inplace=True)
# primary_keys
pks = ['dataset_id', 'scale_id', 'territory_id', 'epiyear',
'epiweek']
df.set_index(pks, inplace=True)
return df
def migrate_mem_report(df):
migration_rules = {
'UF': 'territory_id',
'População': 'population',
'Média geométrica do pico de infecção das temporadas regulares':
'geom_average_peak',
'região de baixa atividade típica': 'low_activity_region',
'limiar pré-epidêmico': 'pre_epidemic_threshold',
'intensidade alta': 'high_threshold',
'intensidade muito alta': 'very_high_threshold',
'SE típica do início do surto': 'epi_start',
'SE típica do início do surto - IC inferior (2,5%)':
'epi_start_ci_lower',
'SE típica do início do surto - IC superior (97,5%)':
'epi_start_ci_upper',
'duração típica do surto': 'epi_duration',
'duração típica do surto - IC inferior (2,5%)':
'epi_duration_ci_lower',
'duração típica do surto - IC superior (97,5%)':
'epi_duration_ci_upper',
'temporadas utilizadas para os corredores endêmicos':
'regular_seasons',
'ano': 'year',
'Unidade da Federação': 'state_country_name', # Not needed ...
'Tipo': 'territory_type', # Not needed in the table
'dado': 'dataset_id',
'escala': 'scale_id'
}
df.rename(
columns=migration_rules, inplace=True
)
# apply categories
df.dataset_id = df.dataset_id.map(dataset_id)
df.scale_id = df.scale_id.map(scale_id)
regions_indeces = df.territory_id.isin([
'BR', 'RegN', 'RegL', 'RegC', 'RegS', 'RegNI',
'N', 'NE', 'SE', 'S', 'CO', 'RNI'
])
df.loc[regions_indeces, 'territory_id'] = df.loc[
regions_indeces, 'territory_id'
].map(region_id)
df.territory_id = df.territory_id.astype(int)
# remove unnecessary fields
df.drop([
'state_country_name', 'territory_type'
], axis=1, inplace=True)
# primary_keys
pks = ['dataset_id', 'scale_id', 'territory_id', 'year']
df.set_index(pks, inplace=True)
return df
def migrate_mem_typical(df):
migration_rules = {
'UF': 'territory_id',
'População': 'population',
'corredor baixo': 'low',
'corredor mediano': 'median',
'corredor alto': 'high',
'ano': 'year',
'Unidade da Federação': 'state_country_name', # Not needed ...
'Tipo': 'territory_type', # Not needed in the table
'dado': 'dataset_id',
'escala': 'scale_id'
}
df.rename(
columns=migration_rules, inplace=True
)
# apply categories
df.dataset_id = df.dataset_id.map(dataset_id)
df.scale_id = df.scale_id.map(scale_id)
regions_indeces = df.territory_id.isin([
'BR', 'RegN', 'RegL', 'RegC', 'RegS', 'RegNI',
'N', 'NE', 'SE', 'S', 'CO', 'RNI'
])
df.loc[regions_indeces, 'territory_id'] = df.loc[
regions_indeces, 'territory_id'
].map(region_id)
df.territory_id = df.territory_id.astype(int)
# remove unnecessary fields
df.drop([
'state_country_name', 'territory_type'
], axis=1, inplace=True)
# primary_keys
pks = ['dataset_id', 'scale_id', 'territory_id', 'year',
'epiweek']
df.set_index(pks, inplace=True)
return df
def migrate_delay_table(df):
migration_rules = {
'UF': 'territory_id',
'SinPri2Interna_DelayDays': 'symptoms2hospitalization',
'Interna2Evoluca_DelayDays': 'hospitalization2evolution',
'Notific2Digita_DelayDays': 'notification2digitalization',
'SinPri2Digita_DelayDays': 'symptoms2digitalization',
'SinPri2Antivir_DelayDays': 'symptoms2antiviral',
'SinPri2Notific_DelayDays': 'symptoms2notification',
'SinPri2Coleta_DelayDays': 'symptoms2sample',
'Notific2Encerra_DelayDays': 'notification2closure',
'Coleta2IFI_DelayDays': 'sample2ifi',
'Coleta2PCR_DelayDays': 'sample2pcr',
'Regional': 'regional',
'Regiao': 'region',
'dado': 'dataset_id'
}
df.rename(columns=migration_rules, inplace=True)
# apply categories
df.dataset_id = df.dataset_id.map(dataset_id)
df.territory_id = df.territory_id.astype(int)
df.regional = df.regional.map(region_id).astype(int)
df.region = df.region.map(region_id).astype(int)
# remove unnecessary fields
df.drop([
'DT_SIN_PRI_epiyearweek',
'DT_SIN_PRI_epiweek',
'DT_SIN_PRI_epiyear',
'DT_DIGITA_epiyearweek',
'DT_DIGITA_epiyear',
'DT_DIGITA_epiweek',
'Notific2Digita_DelayWeeks',
'SinPri2Digita_DelayWeeks',
'SinPri2Antivir_DelayWeeks',
'SinPri2Notific_DelayWeeks',
'SinPri2Coleta_DelayWeeks',
'SinPri2Interna_DelayWeeks',
'Interna2Evoluca_DelayWeeks',
'Notific2Encerra_DelayWeeks',
'Coleta2IFI_DelayWeeks',
'Coleta2PCR_DelayWeeks',
'Notific2Coleta_DelayWeeks',
'Notific2Antivir_DelayWeeks',
'Digita2Antivir_DelayWeeks',
'Notific2Coleta_DelayDays',
'Notific2Antivir_DelayDays',
'Digita2Antivir_DelayDays',
], axis=1, inplace=True)
# add index
df['id'] = df.index
# primary keys
pks = ['id', 'territory_id', 'epiyear', 'epiweek']
df.set_index(pks, inplace=True)
return df
def migrate_contingency_level(df):
pks = ['territory_id', 'epiyear']
df.set_index(pks, inplace=True)
return df
def migrate_weekly_alert(df):
pks = ['dataset_id', 'territory_id', 'epiyear', 'epiweek']
df.set_index(pks, inplace=True)
return df
def migrate_season_level(df):
pks = ['dataset_id', 'territory_id', 'epiyear']
df.set_index(pks, inplace=True)
return df
def migrate_from_csv_to_psql(dfs=None, suff='', basic_tables=True):
"""
:return:
"""
if dfs is None:
print('Data files:')
dfs = {}
path_data_files = os.path.join(PATH, '../../data/data', '*.csv')
for file_path in glob.glob(path_data_files):
filename = get_filename_from_path(file_path)
print(filename)
dfs[filename] = pd.read_csv(file_path)
datasets_migration = {
'current_estimated_values': migrate_current_estimates,
'historical_estimated_values': migrate_historical_estimates,
'clean_data_epiweek-weekly-incidence_w_situation': migrate_clean_data_epiweek,
'mem-report': migrate_mem_report,
'mem-typical': migrate_mem_typical,
'delay_table': migrate_delay_table,
'contingency_level': migrate_contingency_level,
'weekly_alert': migrate_weekly_alert,
'season_level': migrate_season_level
}
for k, df in dfs.items():
print('Polishing table: %s' % k)
dfs[k] = datasets_migration[k.replace(suff, '')](dfs[k])
print('Done!')
# ## 1. Setting IDs
# ### 1.1 Datasets
if basic_tables:
# creating dataset dataframe
df_dataset = pd.DataFrame({
'id': list(dataset_id.values()),
'name': list(dataset_id.keys())
}).set_index('id')
dfs['dataset'] = df_dataset
# creating situation dataframe
df_situation = pd.DataFrame({
'id': list(situation_id.values()),
'name': list(situation_id.keys())
}).set_index('id')
dfs['situation'] = df_situation
# creating scale dataframe
df_scale = pd.DataFrame({
'id': list(scale_id.values()),
'name': list(scale_id.keys())
}).set_index('id')
dfs['scale'] = df_scale
# creating territory_type dataframe
df_territory_type = pd.DataFrame({
'id': list(territory_type_id.values()),
'name': list(territory_type_id.keys())
}).set_index('id')
dfs['territory_type'] = df_territory_type
df_territory.set_index('id', inplace=True)
dfs['territory'] = df_territory
# ## SQL Migration
exception_type_field = {
'run_date': 'DATE'
}
dsn = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s'
engine = sqla.create_engine(dsn % DATABASE)
for k, df in dfs.items():
k_new = k.replace('-', '_')
print('Migrating %s ...' % k_new)
df.to_sql(
k_new, engine, if_exists='replace',
chunksize=2048
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Update datafiles and DB.\n",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-F', '--Force', help='Force download', action='store_true')
parser.add_argument('-b', '--basic', help='Update base tables', action='store_true')
parser.add_argument('-d', '--database', help='Update database', action='store_true')
args = parser.parse_args()
update_data_files(force=args.Force)
if args.database:
migrate_from_csv_to_psql(basic_tables=args.basic)
| FluVigilanciaBR/seasonality | methods/data_filter/migration.py | migration.py | py | 19,644 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "settings.PATH",
"line_number": 145,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line... |
20421083972 | from itertools import combinations
st=int(input())
c=int(input())
arr=[]
for x in range(0,c):
arr.append(input().split())
arr2=[]
for y in range(0,len(arr)):
arr[y]=[int(x) for x in arr[y]]
for x in range(0,len(arr)):
cnt=arr[x][0]
for y in range(0,len(arr)):
if arr[y][0]==cnt:
if arr[x][1]>arr[y][1]:
tmp=arr[x]
else:
tmp=arr[y]
arr2.append(tmp)
arr3=[]
arr4=[]
for x in range(1,len(arr2)+1):
tmp=(list(combinations(arr2,x)))
for y in tmp:
arr3.append(y)
for x in range(0,len(arr3)):
cnt=0
for y in range(0,len(arr3[x])):
cnt=cnt+arr3[x][y][0]
if cnt<=st:
arr4.append(arr3[x])
best=0
bests=[]
for x in range(0,len(arr4)):
cnt=0
for y in range(0,len(arr4[x])):
cnt=cnt+arr4[x][y][1]
if best<=cnt:
best=cnt
bestindex=x
print(best)
print(len(arr4[bestindex]))
for y in range(0,len(arr4[bestindex])):
bests.append(arr.index(arr4[bestindex][y])+1)
for x in bests:
print(x,end=' ')
| goltong1/NYPC | NYPC/2018/NYPC 08.py | NYPC 08.py | py | 1,058 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "itertools.combinations",
"line_number": 22,
"usage_type": "call"
}
] |
70007701475 | import collections
from collections import Counter, defaultdict
import numpy as np
import jsonlines
import os
import re
from typing import *
import torch
from torch import nn
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
torch.manual_seed(1792507)
from model import Model
# PRE-TRAINED EMBEDDING (Word2Vec Continuous Skipgram - English CoNLL17 corpus)
word_vectors = dict()
words_limit = 100_000
with open('model/word2vec.txt', 'r', encoding="ISO-8859-1") as f:
next(f) # skip header
for i, line in enumerate(f):
if i == words_limit:
break
word, *vector = line.strip().split(' ')
vector = torch.tensor([float(c) for c in vector])
word_vectors[word] = vector
# VECTORS_STORE AND WORD_INDEX TO DEAL WITH UNKNOWN AND PADDING
word_index = dict()
vectors_store = []
vectors_store.append(torch.rand(100)) # index = 0 -> pad token
vectors_store.append(torch.rand(100)) # index = 1 -> unk token
vectors_store.append(torch.rand(100)) # index = 2 -> separator token (between two sentences in second apporach)
for word, vector in word_vectors.items():
word_index[word] = len(vectors_store) # index = 3, 4 .....
vectors_store.append(vector) # third, forth... words
word_index = defaultdict(lambda: 1, word_index) # unknown word -> default index = 1
vectors_store = torch.stack(vectors_store)
# line -> extract features and append to data_store
def vectorize_line(line):
vector = []
vectorize_sentence('sentence1', line, vector) # vectorize and append first sentence
vector.append(vectors_store[0]) # sentences separator; in this case I used the pad token because I used the token separator only in second approach
vectorize_sentence('sentence2', line, vector) # vectorize and append second sentence
vector = torch.mean(torch.stack(vector), dim=0) # data as mean of the vector components
# In my notebook I use the real supervised label, in this case it is not passed
# I am using this "fake" label to keep the same code structure
label = 0
return (vector, label)
# sentence -> extract feature of single sentence
def vectorize_sentence (sentence: str, line, vector):
for word in line[sentence].strip().split(' '):
word = re.sub('[\.,:;!@#$\(\)\-&\\<>/0-9"”“]', '', word).lower()
if len(word) > 3:
vector.append(vectors_store[word_index[word]])
def build_model(device: str) -> Model:
# STUDENT: return StudentModel()
# STUDENT: your model MUST be loaded on the device "device" indicates
# PRE-TRAINED EMBEDDING (Word2Vec Continuous Skipgram - English CoNLL17 corpus)
model = StudentModel(100, 1024)
model.eval()
model.load_state_dict(torch.load('model/saved_model_0.667.pt', map_location=torch.device(device)))
return model.to(device)
class RandomBaseline(Model):
options = [
('True', 40000),
('False', 40000),
]
def __init__(self):
self._options = [option[0] for option in self.options]
self._weights = np.array([option[1] for option in self.options])
self._weights = self._weights / self._weights.sum()
def predict(self, sentence_pairs: List[Dict]) -> List[str]:
return [str(np.random.choice(self._options, 1, p=self._weights)[0]) for x in sentence_pairs]
class Model(torch.nn.Module):
pass
class StudentModel(Model):
# STUDENT: construct here your model
# this class should be loading your weights and vocabulary
def __init__(self, n_features: int, n_hidden: int):
super().__init__()
# Linear layers
self.lin1 = torch.nn.Linear(n_features, n_hidden)
self.lin2 = torch.nn.Linear(n_hidden, n_hidden//2)
self.lin3 = torch.nn.Linear(n_hidden//2, n_hidden//4)
self.lin4 = torch.nn.Linear(n_hidden//4, n_hidden//8)
self.lin5 = torch.nn.Linear(n_hidden//8, 1)
# Dropout layer
self.drop = torch.nn.Dropout(0.4)
# Binary classification task -> Binary cross-entropy loss function
self.loss_fn = torch.nn.BCELoss()
def forward(self, x: torch.Tensor, y: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
# Network structure
out = self.lin1(x)
out = torch.relu(out)
out = self.drop(out)
out = self.lin2(out)
out = torch.relu(out)
out = self.drop(out)
out = self.lin3(out)
out = torch.relu(out)
out = self.drop(out)
out = self.lin4(out)
out = torch.relu(out)
out = self.drop(out)
out = self.lin5(out).squeeze(1)
# Binary classification task -> Sigmoid activation function
out = torch.sigmoid(out)
result = {'pred': out}
# If we have labels, loss computation
if y is not None:
loss = self.loss(out, y)
result['loss'] = loss
return result
def loss(self, pred, y):
return self.loss_fn(pred, y)
def predict(self, sentence_pairs: List[Dict]) -> List[str]:
# STUDENT: implement here your predict function
# remember to respect the same order of sentences!
data_store = []
for line in sentence_pairs:
data_store.append(vectorize_line(line))
validation_dataloader = DataLoader(data_store, batch_size=32)
res = []
for x, _ in validation_dataloader:
batch_out = self(x)
pred = torch.round(batch_out['pred'])
for elem in pred:
if elem>0.5:
res.append('True')
else:
res.append('False')
return res
| lello5/university-projects | Master degree/Natural Language Processing/HW1/hw1/stud/implementation.py | implementation.py | py | 5,750 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "torch.manual_seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_numb... |
24849378108 | import requests
import csv
url = "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/ja.wikipedia.org/all-access/all-agents/%E9%AC%BC%E6%BB%85%E3%81%AE%E5%88%83/daily/20200601/20200630"
headers = {"User-Agent": "smatsuda@x-hack.jp"}
r = requests.get(url, headers=headers)
data_file = open('data_file.csv', 'w')
csv_writer = csv.writer(data_file)
for item in r.json()["items"]:
csv_writer.writerow([item["article"], item["views"]]) | xhackjp1/python-scraping | 0623/scraping-pageview.py | scraping-pageview.py | py | 453 | python | en | code | null | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 10,
"usage_type": "call"
}
] |
36044561026 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 09:02:24 2020
@author: user
"""
import cv2
import numpy
cap=cv2.VideoCapture(0)
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
facedata=[]
while True:
ret,frame=cap.read()
if ret==False:
continue
cv2.imshow("frame",frame)
faces=face_cascade.detectMultiScale(frame,1.3,5)
faces=sorted(faces,key=lambda f:f[2]*f[3])
#pick the largest face(i.e last face)
facesec=0
for face in faces[-1:]:
x,y,w,h=face
cv2.rectangle(frame, (x,y),(x+w,y+h), (0,255,255),2)
#extract (crop out required face):region of intrest
offset=10
facesec=frame[y-offset:y+h+offset,x-offset:x+w+offset]
facesec=cv2.resize(facesec,(100,100))
skip=1
if skip%10==0:
facedata.append(facesec)
print(len(facedata))
cv2.imshow('frame',frame)
cv2.imshow('face section',facesec)
keypressed=cv2.waitKey(1)&0xFF
if keypressed == ord('q'):
break
#convert our face list array into numpy array
facedata=numpy.asarray(facedata)
facedata=facedata.reshape((facedata.shape[0],-1))
print(facedata.shape)
cap.release()
cv2.destroyAllWindows() | himanshisehgal19/Face-Detection-OpenCV- | imagecapture.py | imagecapture.py | py | 1,259 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
... |
40478697751 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
from layers.utils import loss_tools
from layers.box_annotation_layer import annotate_proposals
def rcnn_loss_layer(score, delta, label_list, fg_delta_list, index_list):
# score (p, c)
# delta (p, c, 4)
# label_list list(n, l) sum(l) == k
# fg_delta_list list(n, l1, 4)
# index_list List[Tuple[Tensor, Tensor]]
# return loss()
p, num_class = score.shape[:2]
reg_loss, cls_loss = torch.tensor(0.), torch.tensor(0.)
n = len(index_list)
# labels = torch.cat(label_list)
for i in range(n):
label = label_list[i]
fg_i, bg_i = index_list[i]
index = torch.cat((fg_i, bg_i))
# (k,)
cls_loss += F.cross_entropy(score[index], label)
# (l1, 4)
fg_delta_i = fg_delta_list[i]
for j in range(1, num_class + 1):
index_j = torch.where(label == j)
x = fg_i[index_j]
if x.shape[0] == 0:
continue
pred_delta_j = delta[x, j]
gt_delta_j = fg_delta_i[index_j]
if pred_delta_j.shape[0] > 0:
reg_loss += loss_tools.smooth_l1_loss(pred_delta_j, gt_delta_j)
return reg_loss + cls_loss
def compute(model, score, bbox, gt_label, gt_bbox):
label_list, fg_delta_list, index_list = \
annotate_proposals(model.rois_list, gt_bbox, gt_label)
rcnn_loss = rcnn_loss_layer(score, bbox, label_list, fg_delta_list, index_list)
return rcnn_loss
if __name__ == '__main__':
import time
from rcnn import RCNN
from resnet import resnet18
from torch import nn
resnet = resnet18()
feature_extractor = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3
)
# noinspection PyUnresolvedReferences
conv_to_head = nn.Sequential(
resnet.layer4,
resnet.avgpool,
nn.Flatten(),
)
config = {
'feature_stride': 16,
'feature_compress': 1 / 16,
'num_feature_channel': 256,
'num_fc7_channel': 512,
'num_rpn_channel': 512,
'num_anchor': 9,
'score_top_n': 100,
'nms_top_n': 50,
'nms_thresh': 0.7,
'pool_out_size': 8,
'num_class': 5,
'radios': (0.5, 1, 2),
'scales': (8, 16, 32),
}
model_ = RCNN(feature_extractor, conv_to_head, config)
n_ = 10
image = torch.rand((n_, 3, 128, 128))
model_.total_mode()
# score_(k, num_cls) delta_(k, num_cls, 4)
score_, delta_ = model_.forward(image)
print(score_.shape, delta_.shape)
gt_bbox_ = torch.randint(0, 8 * 16, (n_, 8, 4)).sort(dim=2)[0].float()
gt_label_ = torch.randint(1, config['num_class'], (n_, 8))
label_list_, fg_delta_list_, index_list_ = annotate_proposals(model_.rois_list, gt_bbox_, gt_label_)
print(label_list_[0].shape, fg_delta_list_[0].shape, index_list_[0][0].shape, index_list_[0][1].shape)
t = time.time()
loss_ = rcnn_loss_layer(score_, delta_, label_list_, fg_delta_list_, index_list_)
print(time.time() - t)
print(loss_)
| OYMiss/faster-rcnn | layers/rcnn_loss_layer.py | rcnn_loss_layer.py | py | 3,285 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.tensor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.fun... |
31445849910 | import ast
import json
import re
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from src.constants import DATA_PATH # noqa: I900
def get_hashtags(caption):
if isinstance(caption, str):
return re.findall("#[a-z0-9_]+", caption)
return []
def get_mentions(caption):
if isinstance(caption, str):
return [
f"@{match[1]}" for match in re.findall(r"(^|[^\w])@([\w\_\.]+)", caption)
]
return []
def get_interesting_keys(metadata: dict) -> dict:
"""
Filter metadata to get only important keys.
Args:
metadata: Dictionary with metadata.
Returns:
Dictionary with filtered metadata.
"""
likes_count = metadata.get("edge_media_preview_like", {}).get("count", np.nan)
hashtags, mentions = [], []
caption_edges = metadata.get("edge_media_to_caption", {}).get("edges", [])
if caption_edges:
caption = caption_edges[0]["node"]["text"]
hashtags.extend(get_hashtags(caption))
mentions.extend(get_mentions(caption))
return {
"#likes": likes_count,
"hashtags": hashtags,
"mentions": mentions,
"timestamp": metadata.get("taken_at_timestamp", np.nan),
"shortcode": metadata.get("shortcode", np.nan),
}
mapping_file = DATA_PATH / "JSON-Image_files_mapping.txt"
influencers_file = DATA_PATH / "influencers.csv"
mapping_df = pd.read_csv(
mapping_file,
sep="\t",
header=0,
names=["influencer_name", "JSON_PostMetadata_file_name", "Image_file_name"],
)
influencers_df = pd.read_csv(influencers_file)
mapping_df = mapping_df.join(
influencers_df.set_index("username"),
on="influencer_name",
how="left",
)
for category, category_df in mapping_df.groupby("category"):
exctracted_records = []
for row in tqdm(category_df.itertuples(), total=len(category_df), desc=category):
# metadata
metadata_filename = row.JSON_PostMetadata_file_name
meta_path = (
DATA_PATH
/ "Metadata"
/ "info"
/ f"{row.influencer_name}-{metadata_filename}"
)
if not meta_path.exists():
continue
try:
with meta_path.open() as json_file:
metadata = json.load(json_file)
except json.decoder.JSONDecodeError:
continue
# image
images_filenames = ast.literal_eval(row.Image_file_name)
images_count = len(images_filenames)
image_filename = f"{row.influencer_name}-{images_filenames[0]}"
# record
record = {
"username": row.influencer_name,
"image_filename": image_filename,
**get_interesting_keys(metadata),
}
image_path = Path(DATA_PATH / "Images" / "image" / record["image_filename"])
no_likes = np.isnan(record["#likes"])
multiple_images = images_count > 1
image_not_exists = not image_path.exists()
if any([no_likes, multiple_images, image_not_exists]):
continue
else:
exctracted_records.append(record)
df = pd.DataFrame(exctracted_records)
df.to_csv(DATA_PATH / "MetadataCategories" / f"{category}.csv", index=False)
| Stardust87/VIP | scripts/extract_metadata.py | extract_metadata.py | py | 3,268 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.findall",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 5... |
14619951113 | import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output
from importlib import reload
from mysql.connector.errors import OperationalError
from app.app import app
from app.datasources import laudos
from app.apps.layout import menu, style
layout = html.Div([
menu,
html.H3('Consultas na base Laudo - Quantidade por ano de um fator'),
dcc.Dropdown(
id='query',
options=laudos.queries[1],
value=0,
),
dcc.Dropdown(
id='years',
options=laudos.years,
value=['2018'],
multi=True
),
dcc.Dropdown(
id='unidades',
options=laudos.unidades,
value=1,
multi=False
),
dcc.Graph(id='years-graph'),
], style=style
)
@app.callback(Output('years-graph', 'figure'),
[Input('years', 'value'), Input('query', 'value'), Input('unidades', 'value')])
def update_my_graph(selected_dropdown_value, query_value, unidades_value):
data = []
sql = laudos.lista_sql['sql'][query_value]
sql = sql.replace('%unidade%', str(unidades_value))
try:
df = pd.read_sql(sql, con=laudos.db)
except OperationalError:
reload(laudos)
df = pd.read_sql(sql, con=laudos.db)
layout = go.Layout(xaxis=dict(type='category', title=df.columns[1]),
yaxis=dict(title='Número de pedidos'),
margin={'l': 100, 'r': 50, 't': 50, 'b': 150})
data = []
for year in selected_dropdown_value:
df_filtered = df[df['Ano_Solicitacao'] == int(year)]
data.append(go.Bar({
'x': df_filtered[df.columns[1]],
'y': df_filtered.qtde,
'name': year
}))
return {
'data': data,
'layout': layout
}
app.css.append_css(
{'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
| IvanBrasilico/laudos_dash | app/apps/app2.py | app2.py | py | 1,945 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dash_html_components.Div",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.apps.layout.menu",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dash_html_components.H3",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": ... |
13210926327 | import numpy as np
from math import sqrt
from matplotlib.pyplot import *
from matplotlib import animation
k1 = 0.6
k2 = 0.7
w1 = sqrt(k1 + 1)
w2 = sqrt(k2 + 1)
def wave(x,t):
return np.sin(x*k1 - w1*t) + np.sin(x*k2 - w2*t)
T = 20
dt = 1/60.
t = 0
nt = int(T/dt)
nx = 1001
x = np.linspace(0,100,1001)
all_waves = []
while t < T:
all_waves.append(wave(x,t))
t += dt
fig = figure()
line , = plot(x,all_waves[0])
draw()
FPS = 60
inter = 1./FPS
def init():
line.set_data([],[])
return line ,
def get_frame(frame):
line.set_data(x,all_waves[frame])
return line ,
anim = animation.FuncAnimation(fig,get_frame,init_func=init,frames=nt,interval=inter,blit=True)
show()
| simehaa/University | fys2140/oblig3_a.py | oblig3_a.py | py | 698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.sqrt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 19,
... |
27652958158 | from sample_page import sample
from bs4 import BeautifulSoup
import urllib.request
import time
import os
import sys
from html_writer import TEMPLATE, ROW_TEMPLATE
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
print(SCRIPT_DIR)
PROJECT_ROOT = os.path.normpath(SCRIPT_DIR + os.sep + os.pardir)
print(PROJECT_ROOT)
BIN_ROOT = PROJECT_ROOT = os.path.normpath(SCRIPT_DIR + os.sep + os.pardir + os.sep + "bin")
print(BIN_ROOT)
HTML_ROOT = os.path.join(BIN_ROOT, "html")
print(HTML_ROOT)
# Parse the HTML markup
soup = BeautifulSoup(sample, "lxml")
# print(soup.find("a"))
"""
for product in soup.find_all('li'): #li = List
for img in product.find_all('img'):
print("Name: {}".format(img.get("alt")))
for product in soup.find_all('span', {"class": "bold"}):
print(product.getText().strip())
"""
all_products = soup.find_all('li')
number_of_products = len(all_products)
time.sleep(1)
master_list = []
product_number = 0
for product in all_products: # li = List
product_number += 1
# Get name
name = product.find('img')
if name:
name = name.get("alt").strip()
#print("Name: {}".format(name))
# Get price
price = product.find('span', {"class": "bold"})
if name and price:
price = price.getText().strip()
#print("Price: {}".format(price))
# Get Product Link
if name and price:
product_link = product.find("a")
prod_url = product_link.get('href')
#print(prod_url)
# Get Image Link
if name and price:
picture_link = product.find("img")
pic_url = picture_link.get('src')
#print(pic_url)
# Download image
#if name and price:
# print("Downloading picture for {}..".format(product_number))
# pic_name = "pic{}.jpg".format(product_number)
# pic_dir = os.path.join(BIN_ROOT, "pics", pic_name)
# urllib.request.urlretrieve(pic_url, pic_dir)
# print('\n')
if name and price:
master_list.append([name, price, pic_url, prod_url])
html_rows = ""
for p in master_list:
html_rows += ROW_TEMPLATE.format(p[0], "Ebay", p[1], p[2], p[3])
html_rows.replace("\n", "")
print(html_rows)
#print(TEMPLATE.format(html_rows))
#output_html = HTML_ROOT + os.sep + "output.html"
#with open(output_html, "w") as writer:
# writer.write(TEMPLATE.format(html_rows))
| Mailman366/GIT_Repository | Projects/Price_Scraper/python/scratch.py | scratch.py | py | 2,445 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"li... |
32038833506 | from operator import add
from typing import List, Tuple
FILE_NAME = 'input8.in'
rope = [[0, 0] for _ in range(11)] #0 -> head, 1 -> part1, 9 -> part2
dirs = {"R": [0, 1], "L": [0, -1], "U": [-1, 0], "D": [1, 0]}
def sign_value(value: int) -> int:
return (value > 0) - (value < 0)
def get_diffs(head: List[int], tail: List[int]) -> Tuple[int, int]:
hx, hy = head
tx, ty = tail
return hx - tx, hy - ty
with open('input9.in') as file:
visited_one = {(0, 0)}
visited_nine = {(0, 0)}
for line in file:
diro, numo = line.strip().split()
numo = int(numo)
for _ in range(numo):
rope[0] = list(map(add, rope[0], dirs[diro]))
for i in range(1, len(rope)):
diff_x, diff_y = get_diffs(rope[i-1], rope[i])
diffs = map(lambda x: sign_value(x), [diff_x, diff_y])
if abs(diff_x) >= 2 or abs(diff_y) >= 2:
rope[i] = list(map(add, rope[i], diffs))
if i == 1:
visited_one.add((rope[i][0], rope[i][1]))
elif i == 9:
visited_nine.add((rope[i][0], rope[i][1]))
print(len(visited_one))
print(len(visited_nine))
| Jozkings/advent-of-code-2022 | 9.py | 9.py | py | 1,311 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "operator.add",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "operator.add",
"line_nu... |
9512999564 | # -*- coding: utf-8 -*-
# Coded By Kuduxaaa
import json, requests
from flask import request
from flask_restful import Resource
from app.service import PriceCalculator
predictor = PriceCalculator()
# This is example API Resource
class PricePrediction(Resource):
def get(self):
"""
Route get method
type something :)
happy coding
"""
if 'model' not in request.args or len(request.args['model']) <= 0 and \
'brand' not in request.args or len(request.args['brand']) <= 0 and \
'year' not in request.args or len(request.args['year']) <= 0:
return {
'success': False,
'message': 'required data is missing'
}
model = request.args.get('model').lower()
brand = request.args.get('brand').lower()
year = request.args.get('year').lower()
results = predictor.predict_price(year, brand, model)
if results is not None:
return {
'success': True,
'average': results,
'message': f'ს�შუ�ლ� ფ�სი ბ�ზ�რზე �რის: ${str(results)} 💸'
}
else:
return {
'success': False,
'message': '�მ მ�ნქ�ნის ფ�სები ვერვნ�ხეთ 🥺'
}
def post(self):
data = request.form
if not 'brand' in data or len(data['brand']) <= 0 or 'year' not in data:
return {
'success': False,
'message': 'Required data are missing'
}
brand = data['brand']
year = data['year']
url = f'https://api2.myparts.ge/api/ka/products/get?cat_id=1257&limit=15&man_id={brand}&page=1&pr_type_id=1&year_from={year}'
response = requests.get(url, headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; en-USD',
'Accept': 'application/json'
})
if response.status_code == 200:
jdata = json.loads(response.text)
if 'data' in jdata:
return {
'success': True,
'price': jdata['data']['products'][0]['price']
}
return {
'success': False,
'message': 'Price not found'
}
def delete(self):
"""
Route delete method
type something :)
happy coding
"""
pass | Kuduxaaa/fintech | app/api/price.py | price.py | py | 2,667 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "app.service.PriceCalculator",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_na... |
18621907063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: targetstree_service.py
from requests import Session
import xml.etree.ElementTree as ET
import json
import multiprocessing
from joblib import Parallel, delayed
from ..tools import (xmltools, log)
from outpost24hiabclient.clients.hiabclient import HiabClient
from ..entities.targets_tree import TargetsTree, TargetGroupNode, TargetNode
class TargetService:
def __init__(self, hiabclient):
self._logger = log.getLogger(__name__)
self._hiabclient = hiabclient
def get_targets_tree(self):
self.refresh()
return self._targetgroups_tree
def get_target_nodes(self):
self.refresh()
return self._targetgroups_tree.get_all_target_nodes()
def get_targetgroup_nodes(self):
self.refresh()
return self._targetgroups_tree.get_all_targetgroup_nodes()
def create_targetgroup_node_from_fq_string(self, fq):
if(fq is None):
return None
if(fq == ""):
return None
targetgroupnamecomponents = fq.split('\\')
self.refresh()
targetgroup_node = self._targetgroups_tree.get_root_node()
i=1
while(i<len(targetgroupnamecomponents)-1):
root_targetgroup_name = targetgroupnamecomponents[i]
if(targetgroup_node.get_targetgroup().name == root_targetgroup_name):
child_targetgroup_name = targetgroupnamecomponents[i+1]
child = targetgroup_node.get_child_with_name(child_targetgroup_name)
if(child is None):
# create child in op24
parent_targetgroup=targetgroup_node.get_targetgroup()
new_targetgroup = self._hiabclient.create_targetgroup(child_targetgroup_name, parent_targetgroup)
child = TargetGroupNode(new_targetgroup, targetgroup_node)
targetgroup_node.add_child(child)
targetgroup_node = child
i = i+1
return targetgroup_node
def create_target_node(self, targetaddress, targetgroup_name, scanner_name, dnslookup):
self.refresh()
scanner = self.get_scanner_by_name(scanner_name)
targetgroup_node = self._targetgroups_tree.get_targetgroup_node_from_fq_string(targetgroup_name)
targetgroup = targetgroup_node.get_targetgroup()
target = self._hiabclient.create_targets([targetaddress], targetgroup, dnslookup, scanner)[0]
tn = TargetNode(targetgroup_node, target)
targetgroup_node.add_target_node(tn)
return tn
def move_target_node(self, target_node, dst_targetgroup_name):
src_targetgroup_node = target_node.get_parent_targetgroup_node()
dst_targetgroup_node = self._targetgroups_tree.get_targetgroup_node_from_fq_string(dst_targetgroup_name)
response = self._hiabclient.move_target(target_node.get_target(), src_targetgroup_node.get_targetgroup(), dst_targetgroup_node.get_targetgroup(), dst_targetgroup_node.get_parent().get_targetgroup())
if(response):
target_node.set_parent_targetgroup_node(dst_targetgroup_node)
src_targetgroup_node.remove_target_node(target_node)
dst_targetgroup_node.add_target_node(target_node)
return True
return False
def update_target_node(self, target_node, virtualhosts="", scanner_name=None, custom0="", custom1="", custom2="", custom3="", custom4="", custom5="", custom6="", custom7="", custom8="", custom9=""):
scanner_id = target_node.get_target().scannerid
if(scanner_name is not None and scanner_name != ""):
scanner = self._get_scanner_by_name(scanner_name)
scanner_id = scanner.xid
target = self._hiabclient.update_target(target = target_node.get_target(), scannerid = scanner_id, virtualhosts = virtualhosts, custom0 = custom0, custom1 = custom1, custom2 = custom2, custom3 = custom3, custom4 = custom4, custom5 = custom5, custom6 = custom6, custom7 = custom7, custom8 = custom8, custom9 = custom9)
target_node.set_target(target)
return target_node
def remove_target_node(self, target_node):
response = self._hiabclient.delete_targets([target_node.get_target()])
if(response):
targetgroup_node = target_node.get_parent_targetgroup_node()
targetgroup_node.remove_target_node(target_node)
return True
return False
def refresh(self):
if (not hasattr(self, '_targetgroups_tree')):
treebuilder = TargetsTreeBuilder(self._hiabclient)
self._targetgroups_tree = treebuilder.build_tree()
def get_scanner_by_name(self, scanner_name):
for s in self._hiabclient.get_scanners():
if(s.name == scanner_name):
return s
return None
def get_scanner_by_id(self, scanner_id):
for s in self._hiabclient.get_scanners():
if(s.xid == scanner_id):
return s
return None
class TargetsTreeBuilder:
def __init__(self, op24lib):
self._hiabclient = op24lib
self._logger = log.getLogger(__name__)
self._targetgroups_targets = []
def build_tree(self):
targetgroups = self._hiabclient.get_targetgroups()
self._logger.info("Targetgroups fetched")
#num_cores = multiprocessing.cpu_count()
for t in targetgroups:
self._targetgroups_targets.append(self._obtain_targets(t))
#self._targetgroups_targets = Parallel(n_jobs=num_cores)(delayed(self._obtain_targets)(i) for i in targetgroups)
self._logger.info("All targetgroups with targets fetched")
tree = self._build_targetgroup_tree(targetgroups)
tree.set_depth()
self._logger.info("Tree has been built")
self._add_targets_to_tree(tree)
self._logger.info("Targets to tree added")
return tree
def _obtain_targets(self, targetgroup):
tgs = self._hiabclient.get_targets(targetgroup)
return (targetgroup, tgs)
def _build_targetgroup_tree(self, targetgroups):
lookup = self._make_targetgroup_nodes(targetgroups)
root = []
for tgn in lookup:
parent_tgn = self._lookup_parent(lookup, tgn)
if(parent_tgn is not None):
tgn.set_parent(parent_tgn)
parent_tgn.add_child(tgn)
else:
root.append(tgn)
tree = TargetsTree(root[0])
return tree
def _lookup_parent(self, proposed_parents, tgn):
for p in proposed_parents:
potential_parent_targetgroup = p.get_targetgroup()
targetgroup = tgn.get_targetgroup()
if(targetgroup.xiparentid is not None):
if(targetgroup.xiparentid == potential_parent_targetgroup.xid):
return p
def _make_targetgroup_nodes(self, targetgroups):
result = []
for t in targetgroups:
tgn = TargetGroupNode(t)
result.append(tgn)
return result
def _add_targets_to_tree(self, tree):
leave_nodes = tree.get_leave_nodes()
leave_nodes = sorted(leave_nodes, key=lambda n: n.get_depth(), reverse=True)
deepest_depth = leave_nodes[0].get_depth()
layers = reversed(range(0, deepest_depth))
for layer in layers:
if(layer != 0):
targetgroups = tree.get_all_targetgroup_nodes_of_depth(layer)
for tg in targetgroups:
self._add_targets_to_targetgroup_node(tg)
#self._logger.info("Leave nodes fetched")
#self._add_targets_to_targetgroup_nodes(leave_nodes)
def _add_targets_to_targetgroup_nodes(self, nodes):
for n in nodes:
self._add_targets_to_targetgroup_node(n)
parents=[]
for p in nodes:
parent_node = p.get_parent()
if(parent_node is not None and parent_node not in parents):
parents.append(parent_node)
if(len(parents) > 0):
self._add_targets_to_targetgroup_nodes(parents)
self._logger.info("All nodes have been processed.")
def _add_targets_to_targetgroup_node(self, tgn):
self._logger.info("Processing targetgroup node: " + tgn.get_targetgroup().name)
result = []
targets = self._get_targets_in_targetgroup(tgn.get_targetgroup())
for t in targets:
containing_targetnodes = tgn.get_containing_targetnodes_of_target(t)
if(len(containing_targetnodes) == 0):
tn = TargetNode(tgn, t)
tgn.add_target_node(tn)
result.append(tn)
else:
containing_targetgroups_are_all_dynamic = True
for ctn in containing_targetnodes:
if(not(self._is_dynamic_targetgroup(ctn.get_parent_targetgroup_node().get_targetgroup()))):
containing_targetgroups_are_all_dynamic = False
if(containing_targetgroups_are_all_dynamic):
tn = TargetNode(tgn, t)
tgn.add_target_node(tn)
result.append(tn)
self._logger.info("Finished processing node: " + tgn.get_targetgroup().name)
return result
def _is_dynamic_targetgroup(self, targetgroup):
if(targetgroup.rulebased or targetgroup.reportbased):
return True
return False
def _get_targets_in_targetgroup(self, targetgroup):
for tuple in self._targetgroups_targets:
if(targetgroup.xid == tuple[0].xid):
return tuple[1]
def _add_targets_to_nodes(self, targetgroup_node):
targets = self._get_targets_in_targetgroup(targetgroup_node.get_targetgroup())
for t in targets:
targetgroup_node.get
if(not(targetgroup_node.contains_target(t))):
tn = TargetNode(targetgroup_node, t)
targetgroup_node.add_target_node(tn)
| schubergphilis/outpost24hiabclient | outpost24hiabclient/services/target_service.py | target_service.py | py | 10,384 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "tools.log.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tools.log",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "entities.targets_tree.TargetGroupNode",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "e... |
35847125868 | import ast
import asyncio
import time
import traceback
import rank_calc
from data_base import DataBase
from datetime import datetime
class LeaderboardsCollector:
def __init__(self, main_collector) -> None:
self.main_collector = main_collector
self.vime = main_collector.vime_archive.vime
self.status = 'Starting'
self.status_update = 'Starting'
self.main_collector.vime_archive.run_async_thread(self.check_leaderboards())
self.main_collector.vime_archive.run_async_thread(self.update_user_lb_cycle())
async def check_leaderboards(self):
last_leaderboards = {}
while True:
try:
self.status = 'Getting Leaderboards'
leaderboard_list = self.vime.get_leaderboard_list()
self.status = 'Processing Leaderboards'
for leaderboard_data in leaderboard_list:
table_type = leaderboard_data['type']
table_sorts = leaderboard_data['sort']
if table_type == 'guild': continue
for sort in table_sorts:
name = f'{table_type}_{sort}'
while self.vime.limit_remaining <= 10:
time.sleep(1)
leaderboard = self.vime.get_leaderboard(lb_type=table_type, sort=sort, size=1000)
users_list = []
for player in leaderboard['records']:
user_id = (player['user']['id']) if 'user' in player else (player['id'])
users_list.append(user_id)
last_users_list = (last_leaderboards[name]) if name in last_leaderboards else (None)
last_leaderboards[name] = users_list
if last_users_list is None:
continue
for user_id in users_list:
last_index = 1000
index = users_list.index(user_id)
try:
last_index = last_users_list.index(user_id)
except:
pass
diff = last_index-index
if diff == 0: continue
actions = {'leaderboard': {'type': table_type, 'sort': sort, 'diff': diff, 'place': (index+1)}}
self.main_collector.db.insert_async('actions', f'{user_id}, {int(datetime.now().timestamp())}, "{actions}"')
for user_id in last_users_list:
if not user_id in users_list:
last_index = last_users_list.index(user_id)
index = 1000
diff = last_index-index
if diff == 0: continue
actions = {'leaderboard': {'type': table_type, 'sort': sort, 'diff': diff, 'place': '>1000'}}
self.main_collector.db.insert_async('actions', f'{user_id}, {int(datetime.now().timestamp())}, "{actions}"')
self.status = 'Waiting'
time.sleep(1800)
except Exception as e:
self.status = 'Waiting'
time.sleep(10)
async def update_user_lb_cycle(self):
self.user_lb_data = {}
await asyncio.sleep(1)
self.users_lb_db = DataBase('', file_name='user_lb.db')
await self.load_user_lb_data(self.user_lb_data, self.users_lb_db)
date = datetime.now()
while True:
try:
self.status_update = 'Updating Leaderboards'
xp_top = {}
online_top = {}
wins_top = {}
games_top = {}
if date.day != datetime.now().day:
date = datetime.now()
self.user_lb_data = {}
lb_data_copy = self.user_lb_data.copy()
for user in lb_data_copy:
xp_top[user] = lb_data_copy[user]['xp']
online_top[user] = lb_data_copy[user]['online']
wins_top[user] = lb_data_copy[user]['wins']
games_top[user] = lb_data_copy[user]['games']
xp_top = dict(sorted(xp_top.items(), key=lambda item: item[1], reverse=True))
online_top = dict(sorted(online_top.items(), key=lambda item: item[1], reverse=True))
wins_top = dict(sorted(wins_top.items(), key=lambda item: item[1], reverse=True))
games_top = dict(sorted(games_top.items(), key=lambda item: item[1], reverse=True))
self.save_user_lb_data(self.users_lb_db, lb_data_copy, xp_top, online_top, wins_top, games_top)
await self.calc_users_rank()
except Exception as e:
self.main_collector.exceptions.append(traceback.format_exc())
self.status_update = 'Waiting'
time.sleep(10)
self.status_update = 'Waiting'
time.sleep(1800)
async def calc_users_rank(self):
self.status_update = 'Calculating ranks'
users_points = await self.main_collector.vime_archive.users_db.get_async('points', 'id, points', type='all')
data = {}
for user in users_points:
user_id = user[0]
points = ast.literal_eval(user[1])
for game in points:
if game not in data: data[game] = {}
data[game][user_id] = points[game]
data_players_list = {}
for game in data:
data[game] = dict(sorted(data[game].items(), key=lambda item: item[1], reverse=True))
data_players_list[game] = list(data[game].keys())
set_data_pattern = 'rank= CASE {0} END'
case_pattern = 'WHEN id={0} THEN "{1}" '
condition_pattern = 'id IN ({0})'
condition_el_pattern = '{0},'
i = 0
self.status_update = f'Saving player #{i}'
set_data = ''
condition_data = ''
for user in users_points:
try:
user_id = user[0]
ranks = {}
for game in data:
index = data_players_list[game].index(user_id)
rank = rank_calc.get_rank_by_index(index)
ranks[game] = rank
set_data += case_pattern.format(user_id, ranks)
condition_data += condition_el_pattern.format(user_id)
self.status_update = f'Saving player #{i}'
i += 1
except ValueError:
pass
self.main_collector.vime_archive.users_db.update_async('points', set_data_pattern.format(set_data),
condition_pattern.format(condition_data[:-2]))
for game in data:
self.status_update = f'Saving {game}_top'
if len(await self.users_lb_db.request_command(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{game}_top';")) == 0:
self.users_lb_db.execute_command(f'CREATE TABLE {game}_top (user_id INTEGER, stat INTEGER);')
table = data_players_list[game]
table = table[:500]
self.users_lb_db.delete_async(f'{game}_top')
for user_id in table:
self.users_lb_db.insert_async(f'{game}_top', f'{user_id}, {data[game][user_id]}')
async def load_user_lb_data(self, user_lb_data, db):
data = await db.get_async('daily_activity', '*', type='all')
if data is None: return
for d in data:
user_lb_data[d[0]] = ast.literal_eval(d[1])
def save_user_lb_data(self, db, user_lb_data, xp_top, online_top, wins_top, games_top):
self.status_update = 'Saving Leaderboards'
db.delete_async('daily_activity')
db.delete_async('xp_top')
db.delete_async('online_top')
db.delete_async('wins_top')
db.delete_async('games_top')
for i in range(1000):
n = i
if len(xp_top) > n:
db.insert_async('xp_top', f'{list(xp_top.keys())[n]}, {list(xp_top.values())[n]}')
if len(online_top) > n:
db.insert_async('online_top', f'{list(online_top.keys())[n]}, {list(online_top.values())[n]}')
if len(wins_top) > n:
db.insert_async('wins_top', f'{list(wins_top.keys())[n]}, {list(wins_top.values())[n]}')
if len(games_top) > n:
db.insert_async('games_top', f'{list(games_top.keys())[n]}, {list(games_top.values())[n]}')
for user in user_lb_data:
db.insert_async('daily_activity', f'{user}, "{user_lb_data[user]}"')
def add_user_lb_stat(self, user_id: int, stat_name: str, count: int):
if user_id not in self.user_lb_data:
self.user_lb_data[user_id] = {'xp': 0, 'online': 0, 'wins': 0, 'games': 0}
self.user_lb_data[user_id][stat_name] += count
def get_status(self) -> str:
return f'{self.status} | {self.status_update}' | FalmerF/VimeArchive | collector/leaderboards_collector.py | leaderboards_collector.py | py | 9,211 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.... |
11737215383 | """new fields p in project model
Revision ID: dd4e694b3acf
Revises: 966b658403b2
Create Date: 2018-08-09 13:10:41.624416
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd4e694b3acf'
down_revision = '966b658403b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('path', sa.String(length=256), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'path')
# ### end Alembic commands ###
| johndoe-dev/Ecodroid | migrations/versions/dd4e694b3acf_new_fields_p_in_project_model.py | dd4e694b3acf_new_fields_p_in_project_model.py | py | 675 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
12194813975 | # Import necessary modules
import nextcord # Library for building Discord bots
from nextcord.ext import commands # Additional classes for command handling
from nextcord import SlashOption # Specific class for creating slash command options
import openai # OpenAI's Python library, used to interact with the GPT-3 API
import os # OS module for operating system-related operations
import aiohttp # Asynchronous HTTP client/server for asyncio
import asyncio # Library for writing single-threaded concurrent code
import io # Module provides capability to handle the IO operations
from modules.image_process import stitch_images, process_image # Custom functions for processing images
from modules.buttons import ImageButton, RegenerateButton, VaryButton, RegenerateVaryButton # Custom classes for interactive buttons
from config import MAX_PAINT_TASKS
# 'ImageView' class is a child class of 'nextcord.ui.View'. It's used to create the UI for the bot.
class ImageView(nextcord.ui.View):
def __init__(self, image_paths, size, prompt, cog, button_type):
super().__init__() # Call the constructor of the parent class
# Add image buttons for each generated image
for idx, image_path in enumerate(image_paths):
image_button = ImageButton(label=f'I{idx+1}', image_path=image_path)
image_button.row = 0
self.add_item(image_button)
# Add a regenerate button or a regenerate-vary button based on 'button_type'
if button_type == 'regenerate':
regenerate_button = RegenerateButton(size, prompt, cog)
regenerate_button.row = 0
self.add_item(regenerate_button)
elif button_type == 'regenerate_vary':
regenerate_vary_button = RegenerateVaryButton(size, image_paths[0], cog)
regenerate_vary_button.row = 0
self.add_item(regenerate_vary_button)
# Add vary buttons for each generated image
for idx, image_path in enumerate(image_paths):
vary_button = VaryButton(label=f'V{idx+1}', image_path=image_path, size=size, cog=cog)
vary_button.row = 1
self.add_item(vary_button)
# 'Worker' class is used to manage bot tasks like creating images or variations
class Worker:
def __init__(self, cog):
self.cog = cog
self.semaphore = asyncio.Semaphore(MAX_PAINT_TASKS) # Limit the number of concurrent tasks
# Method for creating images. If it fails, it tries again up to 3 times
async def create_image(self, prompt, size, n):
async with self.semaphore:
for attempt in range(3):
try:
response = await asyncio.to_thread(
openai.Image.create,
prompt=prompt,
n=n,
size=size
)
return response
except Exception:
if attempt == 2:
raise
await asyncio.sleep(1)
# Method for creating image variations. If it fails, it tries again up to 3 times
async def create_image_variation(self, image, size, n):
async with self.semaphore:
for attempt in range(3):
try:
response = await asyncio.to_thread(
openai.Image.create_variation,
image=image,
n=n,
size=size
)
return response
except Exception:
if attempt == 2:
raise
await asyncio.sleep(1)
# 'Paint' class is a cog containing commands and listeners for the bot
class Paint(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.worker = Worker(self)
@commands.Cog.listener() # Listener decorator to create event listeners
async def on_ready(self):
print("Paints mixed") # Output when the bot is ready
# Command to generate an image from a user prompt and send it in the Discord chat
async def generate_image(self, interaction, user_prompt, size):
# If the interaction response is not completed, defer it
if not interaction.response.is_done():
await interaction.response.defer()
# Create the necessary directories
os.makedirs('ai_resources/new_images', exist_ok=True)
# Create images and stitch them together
response = await self.worker.create_image(user_prompt, size, 4)
file_to_send, image_files = await asyncio.to_thread(stitch_images, response)
# Send the image with an embed and the ImageView
with open(file_to_send, 'rb') as f:
picture = nextcord.File(f)
embed = nextcord.Embed(title="Your Picassimo!", color=nextcord.Color.yellow(),
description=f"**Prompt:** {user_prompt}\n\n**Size:** {size}")
embed.set_thumbnail(url="https://gateway.ipfs.io/ipfs/QmeQZvBhbZ1umA4muDzUGfLNQfnJmmTVsW3uRGJSXxXWXK")
embed.set_image(url=f"attachment://{file_to_send}")
view = ImageView(image_files, size, user_prompt, self, 'regenerate')
await interaction.followup.send(embed=embed, file=picture, view=view)
# Remove the sent file
await asyncio.to_thread(os.remove, file_to_send)
# Command to generate variations of an image and send them in the Discord chat
async def vary_image(self, interaction, image_path, size):
# Create the necessary directories
os.makedirs('ai_resources/new_images', exist_ok=True)
# Generate variations of the image and stitch them together
if not (image_path.startswith('http://') or image_path.startswith('https://')):
with open(image_path, 'rb') as image_file:
response = await self.worker.create_image_variation(image_file, size, 4)
else:
async with aiohttp.ClientSession() as session:
async with session.get(image_path) as resp:
if resp.status != 200:
return await interaction.followup.send('Could not download file...', ephemeral=True)
data = await resp.read()
byte_stream = io.BytesIO(data)
byte_array = byte_stream.getvalue()
response = await self.worker.create_image_variation(byte_array, size, 4)
file_to_send, image_files = await asyncio.to_thread(stitch_images, response)
# Send the image variations with an embed and the ImageView
with open(file_to_send, 'rb') as f:
picture = nextcord.File(f)
embed = nextcord.Embed(title="Your Picassimo Variations!", color=nextcord.Color.yellow(),
description=f"**Size:** {size}")
embed.set_thumbnail(url="https://gateway.ipfs.io/ipfs/QmeQZvBhbZ1umA4muDzUGfLNQfnJmmTVsW3uRGJSXxXWXK")
embed.set_image(url=f"attachment://{file_to_send}")
view = ImageView(image_files, size, None, self, 'regenerate_vary')
await interaction.followup.send(embed=embed, file=picture, view=view)
# Remove the sent file
await asyncio.to_thread(os.remove, file_to_send)
# Slash command to generate an image from a user prompt
@nextcord.slash_command(description="Generate an image from a text prompt")
async def paint(self, interaction: nextcord.Interaction, prompt: str,
resolution: str = SlashOption(
choices={"256x256": "256x256", "512x512": "512x512", "1024x1024": "1024x1024"},
description="Choose the resolution for the image"
)):
await self.generate_image(interaction, prompt, resolution)
# Slash command to generate variations of a user-uploaded image
@nextcord.slash_command(description="Upload an image and generate variations")
async def upload(self, interaction: nextcord.Interaction,
resolution: str = SlashOption(
choices={"256x256": "256x256", "512x512": "512x512", "1024x1024": "1024x1024"},
description="Choose the output resolution"
)):
try:
await interaction.response.defer()
# Search the channel history for an image uploaded by the user
messages = await interaction.channel.history(limit=50).flatten()
image_url = None
for message in messages:
if message.author == interaction.user and message.attachments:
image_url = message.attachments[0].url
break
# Process the found image and generate variations
if image_url:
file_name = "ai_resources/new_images/uploaded_image.png"
async with aiohttp.ClientSession() as session:
async with session.get(image_url) as resp:
if resp.status != 200:
return await interaction.response.send_message("Could not download file...",
ephemeral=True)
data = await resp.read()
with open(file_name, 'wb') as f:
f.write(data)
processed_image_path = process_image(file_name)
await self.vary_image(interaction, processed_image_path, resolution)
else:
await interaction.response.send_message("Please upload an image.", ephemeral=True)
except Exception as e:
await interaction.response.send_message(f"An error occurred: {e}", ephemeral=True)
# Function to set up the bot
def setup(bot):
bot.add_cog(Paint(bot)) # Add the Paint cog to the bot
| CryptoAutistic80/Nextcord-Cog-Bot | retired cogs/paint.py | paint.py | py | 9,970 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nextcord.ui",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "modules.buttons.ImageButton",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "modules.buttons.RegenerateButton",
"line_number": 27,
"usage_type": "call"
},
{
"api... |
25009064577 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def initialize_weight(x):
nn.init.xavier_uniform_(x.weight)
if x.bias is not None:
nn.init.constant_(x.bias, 0)
class MultiHeadAttention(nn.Module):
"""实现多头点积注意力"""
def __init__(self, d_model, num_head=8):
super().__init__()
self.num_head = num_head
self.d = d_model // num_head
self.scale = self.d ** -0.5
if d_model % num_head != 0:
print('!!!!Warning: d_model % num_head != 0!!!!')
self.linear_q = nn.Linear(d_model, self.d * num_head, bias=False)
self.linear_k = nn.Linear(d_model, self.d * num_head, bias=False)
self.linear_v = nn.Linear(d_model, self.d * num_head, bias=False)
initialize_weight(self.linear_q)
initialize_weight(self.linear_k)
initialize_weight(self.linear_v)
self.output_layer = nn.Linear(self.d * num_head, d_model, bias=False)
initialize_weight(self.output_layer)
def sequence_mask(self, X, valid_len, value):
"""根据有效长度将注意力分数矩阵每个query的末尾元素用掩码覆盖"""
maxlen = X.shape[3]
# [1, 1, d] + [batch_size, num_query, 1] -> [batch_size, num_query, d]
mask = torch.arange((maxlen), dtype=torch.float32,
device=X.device)[None, None, :] >= valid_len[:, :, None]
# shape of mask: [batch_size, num_heads, num_query, d]
mask = mask.unsqueeze(1).repeat(1, X.shape[1], 1, 1)
X[mask] = value
return X
def masked_softmax(self, X, valid_len):
"""带掩码的softmax,有效长度可以是每个batch一个(适用于编码器),
也可以是每个query一个(适用于训练时的解码器自注意力)"""
if valid_len is None:
return nn.functional.softmax(X, dim=-1)
else:
# 有效长度是一维向量:对批量中的每个样本指定一个有效长度
# 有效长度是二维张量:对批量中每个样本的每个query都指定一个有效长度
assert(valid_len.dim() in [1, 2])
if valid_len.dim() == 1:
valid_len = valid_len.reshape(-1, 1).repeat(1, X.shape[2])
X = self.sequence_mask(X, valid_len, value=-1e9)
return nn.functional.softmax(X, dim=-1)
def forward(self, q, k, v, valid_len):
d = self.d
batch_size = q.shape[0]
assert(k.shape[1] == v.shape[1])
if valid_len is not None:
assert(valid_len.shape[0] == batch_size)
q = self.linear_q(q).reshape(batch_size, -1, self.num_head, d)
k = self.linear_k(k).reshape(batch_size, -1, self.num_head, d)
v = self.linear_v(v).reshape(batch_size, -1, self.num_head, d)
# [batch_size, #q/#k/#v, num_heads, d] -> [batch_size, num_heads, #q/#k/#v, d]
q, v, k = q.transpose(1, 2), v.transpose(1, 2), k.transpose(1, 2)
# [batch_size, num_heads, #q, #k/#v]
x = torch.matmul(q, (k.transpose(2, 3))) * self.scale
x = self.masked_softmax(x, valid_len)
x = torch.matmul(x, v)
x = x.transpose(1, 2)
x = x.reshape(batch_size, -1, self.num_head * self.d)
x = self.output_layer(x)
return x
class FeedForwardNetwork(nn.Module):
def __init__(self, d_model, hidden_size):
super().__init__()
self.layer1 = nn.Linear(d_model, hidden_size)
self.relu = nn.ReLU(inplace=True)
self.layer2 = nn.Linear(hidden_size, d_model)
initialize_weight(self.layer1)
initialize_weight(self.layer2)
def forward(self, x):
x = self.layer1(x)
x = self.relu(x)
x = self.layer2(x)
return x
class Add_Norm(nn.Module):
def __init__(self, d_model, dropout):
super().__init__()
self.norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, res, x):
return self.norm(res + self.dropout(x))
class EncoderLayer(nn.Module):
def __init__(self, d_model, ffn_hidden_size, dropout, num_head):
super().__init__()
self.self_attention = MultiHeadAttention(d_model, num_head)
self.add_norm1 = Add_Norm(d_model, dropout)
self.ffn = FeedForwardNetwork(d_model, ffn_hidden_size)
self.add_norm2 = Add_Norm(d_model, dropout)
def forward(self, x, enc_valid_len):
y = self.self_attention(x, x, x, enc_valid_len)
y = self.add_norm1(x, y)
z = self.ffn(y)
z = self.add_norm2(y, z)
return z
class DecoderLayer(nn.Module):
def __init__(self, d_model, ffn_hidden_size, dropout, num_head, i):
super().__init__()
self.i = i
self.self_attention = MultiHeadAttention(d_model, num_head)
self.add_norm1 = Add_Norm(d_model, dropout)
self.enc_dec_attention = MultiHeadAttention(d_model, num_head)
self.add_norm2 = Add_Norm(d_model, dropout)
self.ffn = FeedForwardNetwork(d_model, ffn_hidden_size)
self.add_norm3 = Add_Norm(d_model, dropout)
def forward(self, x, state):
enc_output, enc_valid_len = state[0], state[1]
if state[2][self.i] is None:
self_kv = x
else:
self_kv = torch.concat([state[2][self.i], x], dim=1)
state[2][self.i] = self_kv
if self.training:
batch_size, num_steps, d = x.shape
# 训练时,一个样本中有效长度应该与query在序列中的位置相等
# shape of `dec_valid_len`: [batch_size, num_steps]
dec_valid_len = torch.arange(1, num_steps+1, device=x.device).repeat(batch_size, 1)
else:
dec_valid_len = None
y = self.self_attention(x, self_kv, self_kv, dec_valid_len)
y = self.add_norm1(x, y)
z = self.enc_dec_attention(y, enc_output, enc_output, enc_valid_len)
z = self.add_norm2(y, z)
out = self.ffn(z)
out = self.add_norm3(z, out)
return out, state
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout, max_len=1000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(dropout, inplace=True)
self.P = torch.zeros((1, max_len, d_model))
X = torch.arange(max_len, dtype=torch.float32).reshape(
-1, 1) / torch.pow(10000, torch.arange(
0, d_model, 2, dtype=torch.float32) / d_model)
self.P[:, :, 0::2] = torch.sin(X)
self.P[:, :, 1::2] = torch.cos(X)
def forward(self, X):
# print(X.shape, self.P.shape)
X = X + self.P[:, :X.shape[1], :].to(X.device)
return self.dropout(X)
class Encoder(nn.Module):
def __init__(self, N, d_model, ffn_hidden_size, dropout, num_head, vocab_size):
super().__init__()
self.emb_scale = d_model ** 0.5
self.embedding = nn.Embedding(vocab_size, d_model)
# nn.init.normal_(self.embedding.weight, mean=0, std=d_model**-0.5)
self.positional_encoding = PositionalEncoding(d_model, dropout)
self.layers = nn.ModuleList([EncoderLayer(d_model, ffn_hidden_size, dropout, num_head) for _ in range(N)])
def forward(self, x, enc_valid_len):
out = self.positional_encoding(self.embedding(x) * self.emb_scale)
for m in self.layers:
out = m(out, enc_valid_len)
return out
class Decoder(nn.Module):
def __init__(self, N, d_model, ffn_hidden_size, dropout, num_head, vocab_size):
super().__init__()
self.emb_scale = d_model ** 0.5
self.embedding = nn.Embedding(vocab_size, d_model)
# nn.init.normal_(self.embedding.weight, mean=0, std=d_model**-0.5)
self.positional_encoding = PositionalEncoding(d_model, dropout)
self.layers = nn.ModuleList([DecoderLayer(d_model, ffn_hidden_size, dropout, num_head, i) for i in range(N)])
def forward(self, x, state):
out = self.positional_encoding(self.embedding(x) * self.emb_scale)
for m in self.layers:
out, state = m(out, state)
# shape of `self.embedding.weight`: [vocab_size, d_model]
out = torch.matmul(out, self.embedding.weight.T)
return out, state
def init_state(self, enc_output, enc_valid_len):
N = len(self.layers)
return [enc_output, enc_valid_len, [None for _ in range(N)]]
class Transformer(nn.Module):
def __init__(self,
src_vocab_size,
trg_vocab_size,
N,
d_model,
ffn_hidden_size,
dropout,
num_head):
super().__init__()
self.N = N
self.d_model = d_model
self.ffn_hidden_size = ffn_hidden_size
self.num_head = num_head
self.dropout = dropout
self.encoder = Encoder(N, d_model, ffn_hidden_size, dropout, num_head, src_vocab_size)
self.decoder = Decoder(N, d_model, ffn_hidden_size, dropout, num_head, trg_vocab_size)
def forward(self, inputs, enc_valid_len, targets):
enc_outputs = self.encoder(inputs, enc_valid_len)
state = self.decoder.init_state(enc_outputs, enc_valid_len)
out, state = self.decoder(targets, state)
return out, state
def print_num_params(self):
total_trainable_params = sum(
p.numel() for p in self.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} trainable parameters.')
| zxh0916/WeeklyPaper | Week5-Transformer/module.py | module.py | py | 9,868 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.xavie... |
8761905914 | from Device import Device
from datetime import datetime
#import RPi.GPIO as GPIO
class Led(Device):
status = None
laststatuschange = None
GPIO = None
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
Device.__init__(self)
self.status = False
self.GPIO.setup(self.gpio_pin, self.GPIO.OUT)
self.GPIO.output(self.gpio_pin, self.status) # reset status
def set_status(self, **kwargs): #call here without trigger the hooks
if 'status' in kwargs:
self.status = kwargs['status']
else:
self.status = True
self.GPIO.output(self.gpio_pin, self.status)
self.laststatuschange = datetime.now()
print("{0} Status Change at {1}, Status = {2}, Pin= {3}".format(str(self.device_object_id), str(self.laststatuschange), str(self.status), str(self.gpio_pin)))
return self
def _run_sensor(self, **kwargs):
#if status is not given, lets switch this led.
if 'status' in kwargs:
self.status = kwargs['status']
else:
self.status = not(self.status)
self.GPIO.output(self.gpio_pin, self.status)
self.laststatuschange = datetime.now()
print("{0} Status Change at {1}, Status = {2}, Pin= {3}".format(str(self.device_object_id),str(self.laststatuschange), str(self.status), str(self.gpio_pin)))
return self
| flipsee/rpicenter | sandbox/internal_recipe/Led.py | Led.py | py | 1,458 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Device.Device",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "Device.Device.__init__",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Device.Device",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.n... |
36999949572 | import numpy as np
import datasets.utils.image_utils as image_utils
image_shape = (36546, 63245, 3)
img = np.zeros(image_shape,dtype=np.uint8)
upsampling_factor = 4
patch_size = (256*2**upsampling_factor,256*2**upsampling_factor,3)
patch = image_utils.compute_patch_indices(image_shape=image_shape, patch_size=patch_size, overlap=0)
patch = [item for item in patch if item[0]>=0 and item[1]>=0 and item[2]>=0]
print(patch)
patch1 = image_utils.get_patch_from_3d_data(img, patch_shape=patch_size, patch_index=patch[0])
b = 2 | vuhoangminh/vqa_medical | tests/test_image_utils.py | test_image_utils.py | py | 529 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "datasets.utils.image_utils.compute_patch_indices",
"line_number": 9,
"usage_type": "call"
},
{
"api_na... |
39674272083 | #!/usr/bin/python
"""
Dirty script which provides some (not well maintained) functions for plotting
data.
"""
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import logging
from matplotlib.patches import Polygon, Circle
def shoot(lon, lat, azimuth, maxdist=None):
"""Shooter Function
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated to python by Thomas Lecocq
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852
faz = azimuth * np.pi / 180.
EPS= 0.00000000005
if ((np.abs(np.cos(glat1))<EPS) and not (np.abs(np.sin(faz))<EPS)):
print("Only N-S courses are meaningful, starting at a pole!")
return
a=6378.13/1.852
f=1/298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf==0):
b=0.
else:
b=2. * np.arctan2 (tu, cf)
cu = 1. / np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))
x = (x - 2.) / x
c = 1. - x
c = (x * x / 4. + 1.) / c
d = (0.375 * x * x - 1.) * x
tu = s / (r * a * c)
y = tu
c = y + 1
while (np.abs (y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180./np.pi
glat2 *= 180./np.pi
baz *= 180./np.pi
return (glon2, glat2, baz)
def equi(m, centerlon, centerlat, radius, *args, **kwargs):
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
#m.plot(X,Y,**kwargs) #Should work, but doesn't...
X,Y = m(X,Y)
poly = Polygon(list(zip(X, Y)), facecolor='black', alpha=0.2)
pt = plt.gca().add_patch(poly)
pt2, = plt.plot(X,Y,**kwargs)
return pt2, pt
def add_poly(coords, m):
xy = []
for i in coords:
xy.append(m(i[0], i[1]))
poly = Polygon(xy, facecolor='red', alpha=0.4)
plt.gca().add_patch(poly)
def add_point(coords, m):
xy = m(coords[0], coords[1])
point = Circle(xy, radius=5000, facecolor='red', alpha=0.4)
plt.gca().add_patch(point)
def polyplot(polygons, points):
m = Basemap(projection='robin',lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
for poly in polygons:
add_poly(poly, m)
xs, ys = [], []
for point in points:
x, y = m(point[1], point[0])
xs.append(x)
ys.append(y)
# add_point(point, m)
m.scatter(xs, ys, color='b')
plt.show()
def plotevents(ed):
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
#m = Basemap(projection='robin', lon_0=59, lat_0=-12, lon_1=50, lat_1=4.2)
m = Basemap(projection='merc', lat_0=53.458736, lon_0=-2.2,
resolution='l', area_thresh = 1000.0,
urcrnrlat=58.869587, urcrnrlon=4.186178,
llcrnrlat=48.949979, llcrnrlon=-12.359231) # lat, lon
m.drawcoastlines()
m.drawmapboundary()
torem = []
ims = []
for framid in range(350):
yield
logging.info("Drawing frame %i" % framid)
#for x in torem:
# x.remove()
torem = []
clusters, unclustered, radius = ed.get_clusters(), ed.get_unclustered_points(), ed.c_manager.radius
for c in clusters:
for p in c.get_points():
x, y = m(p[0], p[1])
b, = m.plot(x, y, 'b.')
torem.append(b)
for x in c.centres:
# point = Circle(xy2, radius=2000, facecolor='red', alpha=0.4)
# plt.gca().add_patch(point)
b = equi(m, x[0], x[1], radius, color='red', alpha=0.4)
torem += b
xy2 = m(x[0], x[1])
b, = m.plot(xy2[0], xy2[1], 'g.')
torem.append(b)
for p in unclustered:
x, y = m(p[0], p[1])
b, = m.plot(x, y, 'r.')
torem.append(b)
txt = plt.text(-1, 0.2, "%i - %s" % (framid, ed.c_manager.lasttime), fontsize=10)
torem.append(txt)
#totimg = torem[0]
#for x in range(1, len(torem)):
# totimg += torem[x]
ims.append(torem)
im_ani = animation.ArtistAnimation(plt.gcf(), ims, interval=50, repeat_delay=3000, blit=True)
im_ani.save('im.mp4', writer=writer, dpi=200)
plt.show()
def plotevents(ed):
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
#m = Basemap(projection='robin', lon_0=59, lat_0=-12, lon_1=50, lat_1=4.2)
m = Basemap(projection='merc', lat_0=53.458736, lon_0=-2.2,
resolution='l', area_thresh = 1000.0,
urcrnrlat=58.869587, urcrnrlon=4.186178,
llcrnrlat=48.949979, llcrnrlon=-12.359231) # lat, lon
m.drawcoastlines()
m.drawmapboundary()
torem = []
ims = []
for framid in range(350):
yield
logging.info("Drawing frame %i" % framid)
#for x in torem:
# x.remove()
torem = []
clusters, unclustered, radius = ed.get_clusters(), ed.get_unclustered_points(), ed.c_manager.radius
for c in clusters:
for p in c.get_points():
x, y = m(p[0], p[1])
b, = m.plot(x, y, 'b.')
torem.append(b)
for x in c.centres:
# point = Circle(xy2, radius=2000, facecolor='red', alpha=0.4)
# plt.gca().add_patch(point)
b = equi(m, x[0], x[1], radius, color='red', alpha=0.4)
torem += b
xy2 = m(x[0], x[1])
b, = m.plot(xy2[0], xy2[1], 'g.')
torem.append(b)
for p in unclustered:
x, y = m(p[0], p[1])
b, = m.plot(x, y, 'r.')
torem.append(b)
txt = plt.text(-1, 0.2, "%i - %s" % (framid, ed.c_manager.lasttime), fontsize=10)
torem.append(txt)
#totimg = torem[0]
#for x in range(1, len(torem)):
# totimg += torem[x]
ims.append(torem)
im_ani = animation.ArtistAnimation(plt.gcf(), ims, interval=50, repeat_delay=3000, blit=True)
im_ani.save('im.mp4', writer=writer, dpi=200)
plt.show()
def d_polyplot(polyold, polynew):
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("Before")
m1 = Basemap(projection='robin', lon_0=0)
m1.drawcoastlines()
m1.drawmapboundary()
for poly in polyold:
add_poly(poly, m1)
ax = fig.add_subplot(212)
ax.set_title("Unioned")
m2 = Basemap(projection='robin', lon_0=0)
m2.drawcoastlines()
m2.drawmapboundary()
for poly in polynew:
add_poly(poly, m2)
plt.show() | Humpheh/twied | scripts/examples/polyplotter.py | polyplotter.py | py | 7,684 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "numpy.pi",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.abs",
"line_numbe... |
72499398113 | from sqlalchemy import insert, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.crud.base import CRUDBase, DatabaseModel
from app.models import Question
class CRUDQuesttion(CRUDBase):
"""Класс для работы с моделью Question."""
async def get_previous_object(
self,
session: AsyncSession
) -> DatabaseModel:
db_object = await session.execute(
select(self.model).order_by(
self.model.id.desc()
).offset(1).limit(1)
)
return db_object.scalars().first()
async def get_object_by_question_id(
self,
question_id: int,
session: AsyncSession
) -> DatabaseModel:
db_object = await session.execute(
select(self.model).where(
self.model.question_id == question_id
)
)
return db_object.scalars().first()
async def create_object(
self,
objects_in: list[dict],
session: AsyncSession
) -> None:
await session.execute(
insert(self.model),
objects_in
)
await session.commit()
question_service = CRUDQuesttion(Question)
| SergoSolo/quiz_questions | app/crud/question.py | question.py | py | 1,245 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.crud.base.CRUDBase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.select",
"line_number": 16,
"usage_type": "call"
},
{
"api_n... |
12578117452 | #!/usr/bin/env python3
import math, os.path
import sys
import argparse
from toolbox import *
from toolbox.files import file_groups
def main():
parser = argparse.ArgumentParser(
description="Tidy a folder by moving groups of similar files into separate sub-folders",
)
parser.add_argument(
"folder", metavar="path", type=str, help="Folder path to tidy", nargs="?"
)
parser.add_argument(
"-g",
"--groups",
action="store_true",
default=False,
help="Show file groups information",
)
parser.add_argument(
"-l",
"--list",
action="store_true",
default=False,
help="List files in groups, don't move them",
)
args = parser.parse_args()
argsd = vars(args)
if argsd["groups"]:
for k, v in file_groups.items():
toolboxprint("Group: %s" % (k), cyan_words=[k])
s = []
for e in v:
s.append(e)
toolboxprint(" %s" % (" ".join(s)))
exit()
elif argsd["folder"] is None:
parser.print_help()
exit()
dont_move = argsd["list"]
fs = FileOps(simulate=False, verbose=True, overwrite=False)
for group in file_groups:
toolboxprint("Processing file group %s ..." % (group), cyan_words=[group])
res = fs.get_file_list(argsd["folder"], recursive=False, for_group=group)
new_dest = argsd["folder"] + os.sep + group
if len(res):
if not dont_move:
fs.make_directory(new_dest, silent=True)
print(" Found %d files in %s group" % (len(res), group))
for f in res:
if dont_move:
print(" %s" % (colour_path_str(str(f))))
else:
fs.move_file(f, new_dest)
if __name__ == "__main__":
main()
| michaelgale/toolbox-py | toolbox/scripts/tidyfolder.py | tidyfolder.py | py | 1,871 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "toolbox.files.file_groups.items",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "toolbox.files.file_groups",
"line_number": 36,
"usage_type": "name"
},
{
... |
4199959125 | from collections import deque
def solution(n, edge):
answer = 0
# 연결된 노드 정보 그래프
graph =[[] for _ in range(n+1)]
# 각 노드의 최단거리 리스트
distance = [-1] * (n+1)
# 연결된 노드 정보 추가 - 양방향
for e in edge :
graph[e[0]].append(e[1])
graph[e[1]].append(e[0])
q = deque([1])
distance[1] = 0
while q:
now = q.popleft()
for i in graph[now]:
if distance[i] == -1:
q.append(i)
distance[i] = distance[now] + 1
answer = distance.count(max(distance))
return answer
| hyeonwook98/Algorithm | Programmers/가장 먼 노드.py | 가장 먼 노드.py | py | 683 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 16,
"usage_type": "call"
}
] |
74879021154 | import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
base = 'http://mit.spbau.ru'
r = requests.get(base+'/students/se')
html = r.text
soup = BeautifulSoup(html, 'html.parser')
names = []
images = []
hrefs = []
for div in soup.find_all('div', {'class': 'field-content alumni-userpic'}):
for a in div.find_all('a'):
names.append(a.text)
for img in div.find_all('img'):
images.append(img['src'])
hrefs.append(base + a['href'])
df = pd.DataFrame({'name': [name for name in names if 0 < len(name.split()) < 5], \
'image': images, 'url': hrefs})
fields = ['field field-name-field-program field-type-list-text field-label-above',
'field field-name-field-gradyead field-type-list-integer field-label-inline clearfix',
'field field-name-field-thesistopic field-type-text field-label-above',
'field field-name-field-advisor field-type-text field-label-above',
'field field-name-field-wherenow field-type-text field-label-above',
'field field-name-field-before-au field-type-text field-label-inline clearfix']
image_class = 'field field-name-field-alumni-photo field-type-image field-label-hidden'
blocks = []
images = []
for url in df.url:
try:
block = []
r = requests.get(url)
html = r.text
soup = BeautifulSoup(html, 'html.parser')
for field in fields:
try:
block.append(soup.find('div', {'class': field}).text)
except:
continue
blocks.append(block)
images.append(soup.find('div', {'class': image_class}).find('img')['src'])
except:
blocks.append([])
images.append('None')
print(url)
def get_info(block):
dicts = [{k: v} for k, v in [line.split(':') for line in block]]
info = dict(pair for d in dicts for pair in d.items())
return info
infos = []
for block in blocks:
infos.append(get_info(block))
df['info'] = infos
df['big_images'] = images
path = 'au_photos'
for idx in range(len(df)):
if df.big_images[idx] != 'None':
img_data = requests.get(df.big_images[idx]).content
else:
img_data = requests.get(df.image[idx]).content
with open(os.path.join(path, f'{df.name[idx].split()[0]}.png'), 'wb') as handler:
handler.write(img_data)
df.to_csv('au_students.csv') | Forsenlol/SE_hi | data/scripts_for_parsing/au_alumni.py | au_alumni.py | py | 2,383 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"li... |
39329032276 | import os.path
import pathlib
import subprocess
import setuptools
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# get module version from git tag
client_version = subprocess.run(['git', 'describe', '--tags'],
stdout=subprocess.PIPE).stdout.decode("utf-8").strip()
# The text of the README file
README = (HERE / "README.md").read_text()
# assert "." in alphavantage_api_client_version
# assert os.path.isfile("alphavantage_api_client/version.py")
# with open("alphavantage_api_client/VERSION","w", encoding="utf-8") as fh:
# fh.write(f'{alphavantage_api_client_version}\n')
# This call to setup() does all the work
setup(
name="alphavantage_api_client",
version=client_version,
description="Interact with Alpha Vantage REST API",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/xrgarcia/alphavantage-api-client",
author="Slashbin, LLC",
author_email="support@slashbin.us",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent"
],
packages=setuptools.find_packages(),
py_modules=["alphavantage_api_client"],
include_package_data=True,
install_requires=["requests","pydantic"],
python_requires=">=3.9"
)
| xrgarcia/alphavantage_api_client | setup.py | setup.py | py | 1,427 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
... |
12668151670 | import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
image = cv.imread("example.jpg") #change path for your image
for i in range (0, image.shape[0]):
for j in range (0, image.shape[1]):
pixel = 255 - 1 -image[i][j]
image[i][j] = pixel
cv.imwrite('image.png',image)
cv.imshow("example_result.png",image)
cv.waitKey(0)
cv.destroyAllWindows() #press any key to close the window
| rafaelcbpy/ProcessImage-VisionCompute | filters/Negative_Filter.py | Negative_Filter.py | py | 434 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 15,... |
5432761830 | import docker
import logging
from celery import Celery
from celery.schedules import crontab
app = Celery()
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
cli = docker.from_env(version="1.28")
for node in cli.nodes.list(filters={"role": "worker"}):
logging.info("Adding rebuild task for node %s", node.id)
key = "rebuild-image-cache-" + node.id
app.conf.beat_schedule[key] = {
"task": "gwvolman.tasks.rebuild_image_cache",
"schedule": crontab(minute=0, hour=0),
"options": {"queue": node.id},
}
| whole-tale/gwvolman | gwvolman/scheduler.py | scheduler.py | py | 603 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "celery.Celery",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "docker.from_env",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "celery.schedules.crontab",
... |
34999124884 | # -*- coding: UTF-8 -*-
import template.leetcode as leetcode_template
import template.question as question_template
import resource.table as table_template
import resource.datasource as ddl
def fetch_all_problems():
table_template.normal(ddl.QUESTION_DROP)
table_template.normal(ddl.QUESTION_CREATE)
for data in leetcode_template.question_all():
question_template.save_problem(data.get('stat').get('question_id'),
data.get('difficulty').get('level'),
data.get('stat').get('question__title'),
data.get('stat').get('question__title_slug'),
data.get('paid_only'),
)
fetch_all_problems()
| KochamCie/LeetCodeNote | core/problems.py | problems.py | py | 714 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "resource.table.normal",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "resource.table",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "resource.datasource.QUESTION_DROP",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_n... |
8489624117 | from __future__ import division, print_function, absolute_import
import os
import numpy as np
import pygame
from highway_env.road.graphics import WorldSurface, RoadGraphics
from highway_env.vehicle.graphics import VehicleGraphics
CONTROL = {
"throttle": 0,
"brake": 0.,
"steering": 0
}
ControlledVehicle_CONTROL = {
'acceleration': 0,
'steering': 0
}
class EnvViewer(object):
"""
A viewer to render a highway driving environment.
"""
screen = None
SCREEN_WIDTH = 1800
SCREEN_HEIGHT = 500
pygame.init()
VehicleGraphics.FONT = pygame.font.SysFont('Arial', 16)
def __init__(self, env):
self.env = env
pygame.display.set_caption("Highway-env")
panel_size = (self.SCREEN_WIDTH, self.SCREEN_HEIGHT)
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT),0,32)
# self.screen.fill ((0,0,0))
self.sim_surface = WorldSurface(panel_size, 0, pygame.Surface(panel_size))
self.clock = pygame.time.Clock()
self.enabled = True
if "SDL_VIDEODRIVER" in os.environ and os.environ["SDL_VIDEODRIVER"] == "dummy":
self.enabled = False
self.agent_display = None
self.agent_surface = None
def set_agent_display(self, agent_display):
if self.agent_display is None:
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, 2 * self.SCREEN_HEIGHT))
self.agent_surface = pygame.Surface((self.SCREEN_WIDTH, self.SCREEN_HEIGHT))
self.agent_display = agent_display
def handle_control(self):
pygame.event.pump()
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP]:
ControlledVehicle_CONTROL['steering'] = ControlledVehicle_CONTROL['steering'] - 0.001 if ControlledVehicle_CONTROL['steering'] > -0.2 else -0.2
if pressed[pygame.K_DOWN]:
ControlledVehicle_CONTROL['steering'] = ControlledVehicle_CONTROL['steering'] + 0.001 if ControlledVehicle_CONTROL['steering'] < 0.2 else 0.2
if pressed[pygame.K_RIGHT]:
ControlledVehicle_CONTROL['acceleration'] = ControlledVehicle_CONTROL['acceleration'] + 0.001 if ControlledVehicle_CONTROL['acceleration'] < 1 else 1
if pressed[pygame.K_LEFT]:
ControlledVehicle_CONTROL['acceleration'] = ControlledVehicle_CONTROL['acceleration'] - 0.001 if ControlledVehicle_CONTROL['acceleration'] > -1 else -1
# if not pressed[pygame.K_UP] and not pressed[pygame.K_DOWN]:
# if ControlledVehicle_CONTROL['steering'] > 0:
# ControlledVehicle_CONTROL['steering'] -= 0.1
# elif ControlledVehicle_CONTROL['steering'] < 0:
# ControlledVehicle_CONTROL['steering'] += 0.1
# if -0.1 < ControlledVehicle_CONTROL['steering'] < 0.1:
# ControlledVehicle_CONTROL['steering'] = 0
# if not pressed[pygame.K_LEFT]:
# ControlledVehicle_CONTROL['brake'] = ControlledVehicle_CONTROL['brake'] - 0.1 if ControlledVehicle_CONTROL['brake'] > 0 else 0
# if not pressed[pygame.K_RIGHT]:
# ControlledVehicle_CONTROL['throttle'] = ControlledVehicle_CONTROL['throttle'] - 0.1 if ControlledVehicle_CONTROL['throttle'] > 0 else 0
#print(ControlledVehicle_CONTROL)
self.env.vehicle.act(ControlledVehicle_CONTROL)
def handle_carsim(self):
pygame.event.pump()
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP]:
CONTROL['steering'] = CONTROL['steering'] + 0.05 if CONTROL['steering'] < 0.3 else 0.3
if pressed[pygame.K_DOWN]:
CONTROL['steering'] = CONTROL['steering'] - 0.05 if CONTROL['steering'] > -0.3 else -0.3
if pressed[pygame.K_LEFT]:
CONTROL['brake'] = CONTROL['brake'] + 0.05 if CONTROL['brake'] < 1 else 1
if pressed[pygame.K_RIGHT]:
CONTROL['throttle'] = CONTROL['throttle'] + 0.05 if CONTROL['throttle'] < 1 else 1
if not pressed[pygame.K_UP] and not pressed[pygame.K_DOWN]:
if CONTROL['steering'] > 0:
CONTROL['steering'] -= 0.1
elif CONTROL['steering'] < 0:
CONTROL['steering'] += 0.1
if -0.1 < CONTROL['steering'] < 0.1:
CONTROL['steering'] = 0
if not pressed[pygame.K_LEFT]:
CONTROL['brake'] = CONTROL['brake'] - 0.1 if CONTROL['brake'] > 0 else 0
if not pressed[pygame.K_RIGHT]:
CONTROL['throttle'] = CONTROL['throttle'] - 0.1 if CONTROL['throttle'] > 0 else 0
self.env.vehicle.act(CONTROL)
# action = m.send_request_other()
# if 'error' not in action:
# print(action)
#print(CONTROL['brake'])
def handle_events(self):
"""
Handle pygame events by forwarding them to the display and environment vehicle.
"""
#self.handle_carsim()
#self.handle_control()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.env.close()
if self.env.vehicle:
CONTROL = VehicleGraphics.handle_event(self.env.vehicle, event)
self.sim_surface.handle_event(event)
# car_sim_data = m.send_control(VehicleGraphics.CONTROL)
# #t = m.send_control(VehicleGraphics.CONTROL)
# position = car_sim_data['location']
# rotation = car_sim_data['rotation']
# self.env.vehicle.position[0] = position[0]
# self.env.vehicle.position[1] = position[1]
#self.env.vehicle.heading = -rotation[2] / 57.3
def display(self):
"""
Display the road and vehicles on a pygame window.
"""
if not self.enabled:
return
self.sim_surface.move_display_window_to(self.window_position())
RoadGraphics.display(self.env.road, self.sim_surface)
# RoadGraphics.display_bigmap(self.env, self.env.road, self.sim_surface)
RoadGraphics.display_traffic(self.env.road, self.sim_surface)
# RoadGraphics.display_traffic_bigmap(self.env,self.env.road, self.sim_surface)
if self.agent_display:
self.agent_display(self.agent_surface, self.sim_surface)
self.screen.blit(self.agent_surface, (0, self.SCREEN_HEIGHT))
self.screen.blit(self.sim_surface, (0, 0))
# pygame.display.update()
# self.clock.tick(20)
pygame.display.flip()
def get_image(self):
"""
:return: the rendered image as a rbg array
"""
data = pygame.surfarray.array3d(self.screen)
return np.moveaxis(data, 0, 1)
def window_position(self):
"""
:return: the world position of the center of the displayed window.
"""
if self.env.vehicle:
if False:
return self.env.vehicle.position
else:
# return np.array([self.env.vehicle.position[0], self.env.road.network.LANES_NUMBER / 2 * 4 - 2])
return np.array([self.env.vehicle.position[0], self.env.vehicle.position[1]])
else:
return np.array([0, self.env.road.network.LANES_NUMBER / 2 * 4])
def close(self):
"""
Close the pygame window.
"""
pygame.quit()
| jasonplato/Highway_SimulationPlatform | highway_env/envs/graphics.py | graphics.py | py | 7,307 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "highway_env.vehicle.graphics.VehicleGraphics.FONT",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "highway_env.vehicle.graphics.VehicleGraphics",
"line_number": 28,
"usa... |
35096521634 | import os, sys, time
import requests
DEST_DIR = '.\\imgs'
def show(data):
print(data, end=' ')
sys.stdout.flush()
def save_file(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def download(url):
# print(f'downloading {url}')
resp = requests.get(url)
if resp.status_code == 404:
raise requests.exceptions.HTTPError('404 not found!')
# print(f'Ok!')
time.sleep(0.2)
return resp.content
def download_many(count=5):
url_base = 'https://www.tupianzj.com/meinv/20200914/{:0>6d}.html'
for i in range(217448, count + 217448):
data = download(url_base.format(i))
save_file(data, f'{i}.html')
time.sleep(0.2)
def main(download_many, count):
t0 = time.time()
download_many(count)
print('elapsed: {:.2f}s'.format(time.time() - t0))
if __name__ == '__main__':
main(download_many, 2) | Kiruen/kiruen_funbox | python_playground/fluent_python/future_demo/downloader_base.py | downloader_base.py | py | 935 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.stdout.flush",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
14011134082 | import requests
import os
import json
from dotenv import load_dotenv
load_dotenv()
# To set your enviornment variables in your terminal run the following line:
#export 'BEARER_TOKEN'='<your_bearer_token>'
print(os.environ)
def auth():
return os.environ.get('BEARER_TOKEN')
def create_url():
return "https://api.twitter.com/2/tweets/sample/stream?"
query_params = { 'query': '#Alshabaab (Alshabaab OR shabaab OR Jihad OR Mujahidin OR mandera OR wajir -is:retweet) OR #Garrissa ',
'tweet.fields': 'author_id', }
def create_headers(bearer_token):
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers
def connect_to_endpoint(url, headers):
response = requests.request("GET", url, headers=headers, stream=True)
print(response.status_code)
for response_line in response.iter_lines():
if response_line:
json_response = json.loads(response_line)
print(json.dumps(json_response, indent=4, sort_keys=True))
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
def main():
bearer_token = auth()
url = create_url()
headers = create_headers(bearer_token)
timeout = 0
while True:
connect_to_endpoint(url, headers)
timeout += 1
if __name__ == "__main__":
main() | samkibe/Text-Mining-ON-Twitter----Sample-codes | Stream.py | Stream.py | py | 1,483 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"li... |
24218220896 | import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
header = {
"user-agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
}
def get_ipl_table():
page = requests.get(
f"https://www.sportskeeda.com/go/ipl/points-table", headers=header)
soup = BeautifulSoup(page.content, 'lxml')
table = soup.find('table')
rows = table.find_all('tr')
teams = []
for row in rows:
info = row.find_all('td')
team = []
for i in info:
team.append(i.text.strip())
if len(team) == 0:
continue
teams.append(team)
return tabulate(teams, headers=["Team", "P", "W", "L", "D", "NRR", "Pts"], tablefmt="pretty")
def get_yester_match_result():
page = requests.get(
"https://www.sportskeeda.com/cricket/yesterday-ipl-match-result", headers=header)
soup = BeautifulSoup(page.content, 'lxml')
div_tag = soup.find('div', class_="taxonomy-content")
para_tags = div_tag.find_all("p")
info = []
for para in para_tags[:-10]:
if para.text != "":
info.append(para.text)
info.pop(5)
info.pop(4)
summary = info[0]
scoreBoard = f"{info[1]}\n{info[2]}\n{info[3]}\nMan of the Match: {info[-2]}\n"
maxStatsBatsmen = f"Most Runs : {info[4]}\nMost Sixes : {info[-1]}\n"
maxStatsBowler = f"Most Wickets : {info[5]}"
return (summary, scoreBoard, maxStatsBatsmen, maxStatsBowler)
def get_fixture_ipl():
page = requests.get(
"https://www.sportskeeda.com/go/ipl/schedule", headers=header)
soup = BeautifulSoup(page.content, 'lxml')
matchCards = soup.find_all("div", class_="keeda_cricket_event_card")
matchCards = matchCards[1:6]
fixtures = []
for match in matchCards:
fixture = []
fixture.append((match.find(
"div", class_="keeda_cricket_event_date").text).replace("\n\n", "\n"))
fixture.append(
(match.find("div", class_="keeda_cricket_venue").text).split()[-1]
)
fixture.extend([(x.text) for x in match.find_all(
"span", class_="keeda_cricket_team_name")])
print(fixture)
fixtures.append(fixture)
return tabulate(fixtures, headers=["Date / Time", "Venue", "Home Team", "Away Team"], tablefmt="pretty")
if __name__ == "__main__":
print(get_fixture_ipl())
| Harsh1347/FootBot-DiscordBot | ipl.py | ipl.py | py | 2,388 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.get",
"... |
41978714472 | import struct
from MemoryManager import *
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Util.Padding import unpad
TOTAL_LEN_WITHOUT_PAYLOAD = 23
FILE_NAME_LEN = 255
FILE_PREFIX_LEN = FILE_NAME_LEN + 4 + ID_SIZE
IV_LEN = 16
BLOCK_SIZE = 16
AES_KEY_LEN = 16
import traceback
class Request:
clientID : str
version : int
code : int
payloadSize : int
payload : bytes
def __init__(self, req : bytes) -> None:
try:
print(req)
unpacked = struct.unpack("<%dsBHI" % (ID_SIZE), req)
self.clientID = unpacked[0]
self.version = unpacked[1]
self.code = unpacked[2]
self.payloadSize = unpacked[3]
self.payload = ""
#print("len: " + str(len(req[TOTAL_LEN_WITHOUT_PAYLOAD:])))
#self.payload = struct.unpack("<%ds" % (self.payloadSize), req[TOTAL_LEN_WITHOUT_PAYLOAD:])[0]
#self.payload = unpacked[4]
#print("Debug:")
#print("ClientID: " + self.clientID)
#print("Version: " + str(self.version))
#print("Code: " + str(self.code))
#print("PayloadSize: " + str(self.payloadSize))
#print("Payload: " + str(self.payload))
#if self.payloadSize != len(self.payload):
# raise Exception("Unreliable payload size")
except Exception as e:
traceback.print_exc()
raise Exception("Bad request")
def setPayload(self, req : bytes):
self.payload = struct.unpack("<%ds" % (self.payloadSize), req)[0]
if self.payloadSize != len(self.payload):
raise Exception("Unreliable payload size")
REGISTRATION = 1100
PUBLIC_KEY = 1101
SEND_FILE = 1103
CRC_OK = 1104
CRC_NOT_OK = 1105
CRC_ERROR = 1106
NAME_LEN = 255
PUBLIC_KEY_LEN = 160
class RequestProcessor:
req : Request
memMngr : MemoryManager
def __init__(self, req, memMngr : MemoryManager) -> None:
self.req = Request(req);
self.memMngr = memMngr
def getCode(self):
return self.req.code
def regUser(self):
if self.req.payloadSize != NAME_LEN:
raise Exception("Illegal register size")
return self.memMngr.regUser(self.req.payload.decode('utf-8'))
def signKey(self):
if self.req.payloadSize != NAME_LEN + PUBLIC_KEY_LEN:
raise Exception("Illegal register size")
name = self.req.payload[:NAME_LEN]
pkey = self.req.payload[NAME_LEN:]
self.memMngr.signPublicKey(self.req.clientID, name, pkey)
return pkey
def genAESKey(self, pkey : str):
#print("Hex: " + pkey.hex())
recipient_key = RSA.importKey(pkey)
session_key = get_random_bytes(AES_KEY_LEN)
self.memMngr.signAESKey(self.req.clientID, session_key)
cipher_rsa = PKCS1_OAEP.new(recipient_key)
enc_session_key = cipher_rsa.encrypt(session_key)
#enc_session_key = cipher_rsa.encrypt(session_key)
#print("enc_session_key in hex: " + str(enc_session_key.hex()))
return enc_session_key
def saveFileRequest(self):
if self.req.payloadSize <= FILE_PREFIX_LEN:
raise Exception("Illegal file packet size")
fileInfo = struct.unpack("<%dsI%ds" % (ID_SIZE, FILE_NAME_LEN), self.req.payload[:FILE_PREFIX_LEN])
#print(self.req.payload[FILE_PREFIX_LEN:])
#print(str(len(self.req.payload[FILE_PREFIX_LEN:])) + '\t' + str(fileInfo[1]))
fileEncContent = struct.unpack("<%ds" % (fileInfo[1]) , self.req.payload[FILE_PREFIX_LEN:])[0]
key = self.memMngr.clients[self.req.clientID.hex()].AESKey
cipher = AES.new(key, AES.MODE_CBC, b'\x00' * IV_LEN)
plaintext = unpad(cipher.decrypt(fileEncContent), BLOCK_SIZE)
#plaintext = fileEncContent # debug
#print(plaintext)
#return self.memMngr.saveFile(self.req.clientID, fileInfo[2] , plaintext)
return (self.req.clientID, fileInfo[1], fileInfo[2], self.memMngr.saveFile(self.req.clientID, fileInfo[2] , plaintext))
def crcRequestPayloadProcessor(payload, payloadSize : int):
if payloadSize != ID_SIZE + FILE_NAME_LEN:
raise Exception("Illegal crc payload size")
return struct.unpack("%ds%ds" % (ID_SIZE, FILE_NAME_LEN), payload)
return struct.unpack("%ds%ds" % (ID_SIZE, FILE_NAME_LEN), payload)
def approveFile(self):
ClientID, fileName = RequestProcessor.crcRequestPayloadProcessor(self.req.payload, self.req.payloadSize)
self.memMngr.approveFile(self.req.clientID, fileName)
def wrongFileCrc(self):
ClientID, fileName = RequestProcessor.crcRequestPayloadProcessor(self.req.payload, self.req.payloadSize)
self.memMngr.removeFile(self.req.clientID, fileName)
def procReq(self):
code = self.req.code
if code == REGISTRATION:
return self.regUser()
elif code == PUBLIC_KEY:
return self.genAESKey(self.signKey())
elif code == SEND_FILE:
return self.saveFileRequest()
elif code == CRC_OK:
self.approveFile()
elif code == CRC_NOT_OK:
print("CRC not okay")
elif code == CRC_ERROR:
self.wrongFileCrc()
else:
raise Exception("Invalid code") | Naveh1/SecuredFileTransfer | Server/RequestProcessor.py | RequestProcessor.py | py | 5,402 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "struct.unpack",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "Crypto.PublicKey.RSA.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.