seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38327117535 | from django.shortcuts import render, redirect
from . import forms, models
from django.views.decorators.http import require_POST
import re
from django.conf import settings
import random
from django.db.models import Max
from django.core.mail import send_mail
import smtplib
from django.urls import reverse
where_to_go = "accueil_page"
args_to_go = None
def check_length(maxi, *args):
for arg in args:
if len(str(arg))<0 or len(str(arg))>maxi:
return False
return True
def accueil(request):
all_p = models.Produit.objects.all()
if all_p.count() > 5:
prods = []
t = 1
choice = None
while len(prods) < 5:
if t == 1:
rand_from = all_p.filter(categorie__exact="Téléphone")
if len(rand_from) != 0:
choice = random.choice(rand_from)
elif t == 2:
rand_from = all_p.filter(categorie__exact="Tablette")
if len(rand_from) != 0:
choice = random.choice(rand_from)
elif t == 3:
rand_from = all_p.filter(categorie__exact="Ordinateur")
if len(rand_from) != 0:
choice = random.choice(rand_from)
elif t == 4:
rand_from = all_p.filter(categorie__exact="Appareil Photo")
if len(rand_from) != 0:
choice = random.choice(rand_from)
else:
rand_from = all_p.filter(categorie__exact="Autre")
if len(rand_from) != 0:
choice = random.choice(rand_from)
if choice != None:
if not choice in prods:
prods.append(choice)
t += 1
elif t == 5:
t = 1
else:
t += 1
else:
prods = all_p
search = forms.Search_Bar()
dico = {
"recommended_prods": prods,
"search_bar": search
}
return render(request, 'accueil.html', dico)
def enregistrer(request):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
return redirect("accueil_page")
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
re_inputs = forms.Register_Form()
re_inputs.fields["pays"].initial = "Morocco"
dico = {"inputs": re_inputs}
return render(request, 'register.html', dico)
def generer_code(max_t):
code = ""
while(len(code) < max_t):
if max_t == 8:
if random.randint(0, 1) == 1:
code += chr(random.randint(97, 122))
else:
code += chr(random.randint(65, 90))
if random.randint(0, 1) == 1:
code += random.choice(['1', '2', '3', '4', '5', '6', '7', '8', '9', '@', '.', '+', '-', '_'])
else:
code += chr(random.randint(33, 125))
return code
def send_email(email, content):
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("electro.sb1@gmail.com", "eSelleBuy1")
server.sendmail("oth.lahrimi1996@gmail.com", email, content)
except:
return False
server.quit()
return True
@require_POST
def enregistrement(request):
new_user = forms.Register_Form(request.POST)
try:
if new_user.is_valid():
username = new_user.cleaned_data["username"]
email = new_user.cleaned_data["email"].lower()
password1 = new_user.cleaned_data["password1"]
password2 = new_user.cleaned_data["password2"]
sexe = new_user.cleaned_data["sexe"]
nom = new_user.cleaned_data["nom"]
prenom = new_user.cleaned_data["prenom"]
rue_immeuble = new_user.cleaned_data["rue_immeuble"]
numero = new_user.cleaned_data["numero"]
ville = new_user.cleaned_data["ville"]
code_postal = new_user.cleaned_data["code_postal"]
pays = new_user.cleaned_data["pays"]
tele = new_user.cleaned_data["tele"]
username_exp = r"[0-9a-zA-Z@\.+\-_]+"
if not models.Utilisateur.objects.filter(username__exact=username).exists() and 0<len(username)<=150:
if re.match(username_exp, username):
if not models.Utilisateur.objects.filter(email__iexact=email).exists() and 0<len(email)<=120:
if password1 == password2:
if 8<=len(password1)<=120:
if not models.Utilisateur.objects.filter(password__exact=password1).exists():
if check_length(120, nom, prenom, rue_immeuble, ville) and check_length(10, tele) and tele.isdigit() and numero>0 and code_postal>0:
code = generer_code(24)
if send_email(email, code):
user = models.Utilisateur.objects.create(
username=username,
email=email,
password=password1,
sexe=sexe,
nom=nom,
prenom=prenom,
rue_immeuble=rue_immeuble,
numero=numero,
ville=ville,
code_postal=code_postal,
pays=pays,
tele=tele,
verification_code=code
)
request.session["userID"] = user.id
return redirect("verifier_page")
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
except:
infos = """ => ERROR <=
=> "nom d'utilisateur" doit être unique.
=> "nom d'utilisateur" doit contenir 150 caractères au maximum.
=> "nom d'utilisateur" doit contenir des lettres, des chiffres, @, ., -, +, _.
=> "email" doit être unique.
=> "email" doit contenir 120 caractères au maximum.
=> "mot de passe" et "répéter mot de passe" doivent être uniques.
=> "mot de passe" et "répéter mot de passe" doivent être les mêmes.
=> "mot de passe" et "répéter mot de passe" doivent contenir entre 8 et 120 caractères.
=> "numéro" et "code" doivent être supérieur à 0.
=> Vous devez s'enregistrer avec une connexion active.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
def verifier(request):
if "userID" in request.session and not models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
verify_in = forms.Verify_Form()
dico = {
"inputs": verify_in
}
return render(request, "verify_page.html", dico)
return redirect("accueil_page")
@require_POST
def verifier_process(request):
if "userID" in request.session and not models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
code_form = forms.Verify_Form(request.POST)
if code_form.is_valid():
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
if user.verification_code == code_form.cleaned_data["code_ch"]:
user.verified = True
user.save()
return redirect("connexion_page")
return redirect("verifier_page")
return redirect("accueil_page")
def reenvoyer_code(request):
if "userID" in request.session:
try:
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
if not user.verified:
code = generer_code(24)
user.verification_code = code
user.save()
if send_email(user.email, user.verification_code):
return redirect("verifier_page")
assert 1 == 2
return redirect("accueil_page")
except:
return redirect("deconnexion_processus")
return redirect("connexion_page")
def connexion(request):
try:
if "userID" in request.session:
return redirect("accueil_page")
except:
return redirect("deconnexion_processus")
co_inputs = forms.Login_Form()
dico = {"inputs": co_inputs}
return render(request, 'login.html', dico)
@require_POST
def connexion_p(request):
user = forms.Login_Form(request.POST)
try:
if user.is_valid():
email = user.cleaned_data["email_username"].lower()
username = user.cleaned_data["email_username"]
recognize = None
if models.Utilisateur.objects.filter(email__exact=email).exists():
recognize = models.Utilisateur.objects.get(email__exact=email)
elif models.Utilisateur.objects.filter(username__exact=username).exists():
recognize = models.Utilisateur.objects.get(username__exact=username)
else:
assert 1 == 2
if recognize.password == user.cleaned_data["password"]:
request.session["userID"] = recognize.id
print("\n\n{0}\n\n".format(where_to_go))
return redirect(reverse(where_to_go, args=args_to_go))
else:
assert 1 == 2
else:
assert 1 == 2
except:
infos = """ => ERROR <=
=> "email/nom d'utilisateur" ou "mot de passe" ne sont pas correcte.
=> Inéxistant "email/nom d'utilisateur".
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
def mdp_email(request):
if "userID" in request.session:
return redirect("accueil_page")
email_in = forms.Reset_Password_Email()
dico = {
"inputs": email_in
}
return render(request, "reset_pass_email.html", dico)
@require_POST
def mdp_pass(request):
if "userID" in request.session:
return redirect("accueil_page")
email_f = forms.Reset_Password_Email(request.POST)
if email_f.is_valid():
try:
user = models.Utilisateur.objects.get(email__iexact=email_f.cleaned_data["email"])
passw = generer_code(8)
user.password = passw
if send_email(user.email, passw):
user.save()
mdp_f = forms.Reset_Password_Password()
dico = {
"inputs": mdp_f,
"user": user.id
}
return render(request, "reset_pass_pass.html", dico)
except:
return redirect("vendre_page")
return redirect("mdp_email_page")
@require_POST
def mdp_verifier(request, user_id):
if "userID" in request.session:
return redirect("accueil_page")
pass_f = forms.Reset_Password_Password(request.POST)
if pass_f.is_valid():
try:
user = models.Utilisateur.objects.get(id__exact=user_id)
if user.password == pass_f.cleaned_data["password"]:
request.session["userID"] = user.id
return redirect("accueil_page")
except:
return redirect("connexion_page")
return redirect("mdp_pass_page")
def vendre(request):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
infos = models.Produit.objects.filter(user_id__exact=request.session["userID"])
search = forms.Search_Bar()
dico = {
"infos": infos,
"search_bar": search
}
return render(request, 'vendre.html', dico)
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
global where_to_go
where_to_go = "vendre_page"
print("\n\n{0}\n\n".format(where_to_go))
return redirect('connexion_page')
def deconnexion(request):
global where_to_go
where_to_go = "accueil_page"
global args_to_go
args_to_go = None
if "userID" in request.session:
del request.session["userID"]
return redirect("accueil_page")
return redirect("accueil_page")
def nouvelle_annonce(request):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
n_a_inputs = forms.Nouvelle_Annonce_Form()
search = forms.Search_Bar()
dico = {
"inputs": n_a_inputs,
"search_bar": search
}
return render(request, "nouvelle_annonce.html", dico)
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
global where_to_go
where_to_go = "nouvelle_annonce_page"
return redirect("connexion_page")
@require_POST
def nouvelle_annonce_processus(request):
#try:
if "userID" in request.session:
new_annonce = forms.Nouvelle_Annonce_Form(request.POST, request.FILES)
if new_annonce.is_valid():
if check_length(120, new_annonce.cleaned_data["titre"]) and check_length(14, new_annonce.cleaned_data["prix"]) and check_length(1500, new_annonce.cleaned_data["description"]) and check_length(14, new_annonce.cleaned_data["categorie"]):
product = models.Produit.objects.create(
titre = new_annonce.cleaned_data["titre"],
categorie = new_annonce.cleaned_data["categorie"],
prix = new_annonce.cleaned_data["prix"],
photo1 = new_annonce.cleaned_data["photo1"],
description = new_annonce.cleaned_data["description"],
user_id=models.Utilisateur.objects.get(id__exact=request.session["userID"])
)
for i in [2, 3, 4, 5]:
if new_annonce.cleaned_data["photo{0}".format(i)]!=None and new_annonce.cleaned_data["del_p{0}".format(i)]==0:
if i == 2:
product.photo2 = new_annonce.cleaned_data["photo2"]
elif i == 3:
product.photo3 = new_annonce.cleaned_data["photo3"]
elif i == 4:
product.photo4 = new_annonce.cleaned_data["photo4"]
else:
product.photo5 = new_annonce.cleaned_data["photo5"]
product.save()
return redirect("vendre_page")
'''else:
assert 1 == 2
else:
assert 1 == 2
else:
return redirect("connexion_page")
except:
infos = """ => ERROR <=
=> "titre" ou "prix" ou "categorie" ou "description" ne sont pas valides.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)'''
return redirect("accueil_page")
def modifier_annonce(request, ann_id):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
try:
ann = models.Produit.objects.get(id__exact=ann_id)
if ann.user_id.id == request.session["userID"]:
ann_form = forms.Modifier_Annonce()
ann_form.fields["titre"].initial = ann.titre
ann_form.fields["categorie"].initial = ann.categorie
ann_form.fields["prix"].initial = ann.prix
ann_form.fields["description"].initial = ann.description
search = forms.Search_Bar()
dico = {
"f_inputs": ann_form,
"ann_fields": ann,
"search_bar": search
}
return render(request, "modifier_annonce.html", dico)
return redirect('vendre_page')
except:
return redirect('vendre_page')
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
global where_to_go
where_to_go = "modifier_annonce_page"
global args_to_go
args_to_go = [ann_id]
return redirect('connexion_page')
@require_POST
def modifier_annonce_process(request, ann_id=0):
if "userID" in request.session:
try:
prod = models.Produit.objects.get(id__exact=ann_id)
if prod.user_id.id == request.session["userID"]:
modif_form = forms.Modifier_Annonce(request.POST, request.FILES)
if modif_form.is_valid():
prod.titre = modif_form.cleaned_data["titre"]
prod.categorie = modif_form.cleaned_data["categorie"]
prod.prix = modif_form.cleaned_data["prix"]
prod.description = modif_form.cleaned_data["description"]
print("\n")
for i in [1, 2, 3, 4, 5]:
print("{0}".format(modif_form.cleaned_data["photo{0}".format(i)]))
print("\n")
if modif_form.cleaned_data["photo1"] != None:
prod.photo1 = modif_form.cleaned_data["photo1"]
for i in [2, 3, 4, 5]:
if modif_form.cleaned_data["del_p{0}".format(i)] == 1:
if i == 2:
prod.photo2 = settings.MEDIA_ROOT + "/none_image.png"
elif i == 3:
prod.photo3 = settings.MEDIA_ROOT + "/none_image.png"
elif i == 4:
prod.photo4 = settings.MEDIA_ROOT + "/none_image.png"
else:
prod.photo5 = settings.MEDIA_ROOT + "/none_image.png"
else:
if modif_form.cleaned_data["photo{0}".format(i)] != None:
if i == 2:
prod.photo2 = modif_form.cleaned_data["photo2"]
elif i == 3:
prod.photo3 = modif_form.cleaned_data["photo3"]
elif i == 4:
prod.photo4 = modif_form.cleaned_data["photo4"]
else:
prod.photo5 = modif_form.cleaned_data["photo5"]
prod.save()
return redirect("vendre_page")
else:
return redirect("accueil_page")
else:
return redirect("vendre_page")
except:
return redirect("vendre_page")
else:
return redirect("connexion_page")
return redirect("vendre_page")
def profile_page(request):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
g_form = forms.Generale_Form()
g_form.fields["nom"].initial = user.nom
g_form.fields["prenom"].initial = user.prenom
g_form.fields["tele"].initial = user.tele
g_form.fields["sexe"].initial = user.sexe
a_form = forms.Adresse_Form()
a_form.fields["rue_immeuble"].initial = user.rue_immeuble
a_form.fields["numero"].initial = user.numero
a_form.fields["ville"].initial = user.ville
a_form.fields["code_postal"].initial = user.code_postal
a_form.fields["pays"].initial = user.pays
e_form = forms.Email_Form()
p_form = forms.MotDePasse_Form()
search = forms.Search_Bar()
dico = {
"generale_in": g_form,
"adresse_in": a_form,
"email_in": e_form,
"motdepasse_in": p_form,
"user_id_nbre": request.session["userID"],
"search_bar": search
}
return render(request, 'profile.html', dico)
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
global where_to_go
where_to_go = "profile_page"
return redirect("connexion_page")
@require_POST
def generale_processus(request):
new_infos = forms.Generale_Form(request.POST)
try:
if new_infos.is_valid():
nom = new_infos.cleaned_data["nom"]
prenom = new_infos.cleaned_data["prenom"]
usernames = new_infos.cleaned_data["username"].split("/")
tele = new_infos.cleaned_data["tele"]
sexe = new_infos.cleaned_data["sexe"]
if len(usernames) == 2:
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
if usernames[0] == user.username:
username_exp = r"[0-9a-zA-Z@\.+\-_]+"
if re.match(username_exp, usernames[1]):
if check_length(120, nom, prenom) and check_length(50, tele) and check_length(150, usernames[1]):
user.nom = nom
user.prenom = prenom
user.username = usernames[1]
user.tele = tele
user.sexe = sexe
user.save()
return redirect("profile_page")
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
except:
infos = """ => ERROR <=
=> Invalide "nom d'utilisateur".
=> "nouveau" nom d'utilisateur doit contenir 150 caractères au maximum.
=> "nouveau" nom d'utilisateur doit contenir que des lettres, des chiffre, @, ., +, -, _.
=> "nom" et "prénom" doivent contenir 120 caractères au maximum.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
@require_POST
def adresse_processus(request):
new_infos = forms.Adresse_Form(request.POST)
try:
if new_infos.is_valid():
rue_immeuble = new_infos.cleaned_data["rue_immeuble"]
numero = new_infos.cleaned_data["numero"]
ville = new_infos.cleaned_data["ville"]
code_postal = new_infos.cleaned_data["code_postal"]
pays = new_infos.cleaned_data["pays"]
if check_length(120, rue_immeuble, ville, pays) and numero>0 and code_postal>0:
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
user.rue_immeuble =rue_immeuble
user.numero = numero
user.ville = ville
user.code_postal = code_postal
user.pays = pays
user.save()
return redirect("profile_page")
else:
assert 1 == 2
else:
assert 1 == 2
except:
infos = """ => ERROR <=
=> "rue_immeuble" et "ville" et "pays" doivent contenir 120 caractères au maximum.
=> "numero" et "code_postal" doivent être supérieur à 0.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
@require_POST
def email_processus(request):
new_infos = forms.Email_Form(request.POST)
try:
if new_infos.is_valid():
old_email = new_infos.cleaned_data["old_email"]
new_email = new_infos.cleaned_data["new_email"]
confirm_new_email = new_infos.cleaned_data["confirm_new_email"]
if check_length(120, old_email, new_email, confirm_new_email):
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
if old_email==user.email and new_email==confirm_new_email:
user.email = new_email
user.verified = False
user.save()
return redirect("verifier_page")
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
except:
infos = """ => ERROR <=
=> "ancien email" n'est pas correcte.
=> "nouveau email" et "confirmer nouveau email" doivent être unique.
=> "nouveau email" et "confirmer nouveau email" doivent être les mêmes.
=> "nouveau email" et "confirmer nouveau email" doivent contenir 120 caractères au maximum.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
@require_POST
def password_processus(request):
new_infos = forms.MotDePasse_Form(request.POST)
try:
if new_infos.is_valid():
old_pass = new_infos.cleaned_data["old_password"]
new_pass = new_infos.cleaned_data["new_password"]
confirm_new_pass = new_infos.cleaned_data["confirm_new_password"]
if 8<=len(old_pass)<=120 and 8<=len(new_pass)<=120 and 8<=len(confirm_new_pass)<=120:
user = models.Utilisateur.objects.get(id__exact=request.session["userID"])
if old_pass==user.password and new_pass==confirm_new_pass:
user.password = new_pass
user.save()
return redirect("profile_page")
else:
assert 1 == 2
else:
assert 1 == 2
else:
assert 1 == 2
except:
infos = """ => ERROR <=
=> "ancien mot de passe" n'est pas correcte.
=> "nouveau mot de passe" et "confirmer mot de passe" doivent être unique.
=> "nouveau mot de passe" et "confirmer nouveau mot de passe" doivent être les mêmes.
=> "nouveau mot de passe" et "confirmer nouveau mot de passe" doivent contenir entre 8 et 120 caractères.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
def acheter(request, categorie=None):
prods = None
if "categorie" in request.GET:
infos = forms.Acheter_Form(request.GET)
if infos.is_valid():
categorie = infos.cleaned_data["categorie"]
trier_par = infos.cleaned_data["trier_par"]
min_prix = infos.cleaned_data["min_prix"]
max_prix = infos.cleaned_data["max_prix"]
prods = models.Produit.objects.all().order_by("{0}".format(trier_par))
if categorie != None:
prods = prods.filter(categorie__exact=categorie)
if min_prix==None and max_prix!=None:
prods = prods.filter(prix__lte=max_prix)
elif min_prix!=None and max_prix==None:
prods = prods.filter(prix__gte=min_prix)
elif min_prix!=None and max_prix!=None:
prods = prods.filter(prix__gte=min_prix).filter(prix__lte=max_prix)
elif "bar" in request.GET:
infos = forms.Search_Bar(request.GET)
if infos.is_valid():
to_s = infos.cleaned_data["bar"]
if len(to_s) == 0:
prods = models.Produit.objects.all().order_by("-date_depot")
else:
not_touch = r"[0-9a-zA-Zçüéâàêèïîû ]"
for i in range(0, len(to_s)):
if re.search(not_touch, to_s[i]) == None:
to_s = to_s.replace(to_s[i], ' ')
to_s = to_s.split()
prods = []
all_prods = models.Produit.objects.all()
for p in all_prods:
for s in to_s:
if p.titre.lower().find(s.lower()) != -1:
prods.append(p)
break
elif categorie!=None and categorie in ["Téléphone", "Tablette", "Ordinateur", "Appareil Photo", "Télévision", "Autre"]:
prods = models.Produit.objects.filter(categorie__exact=categorie).order_by("-date_depot")
else:
prods = models.Produit.objects.all().order_by("-date_depot")
ach_form = forms.Acheter_Form()
search = forms.Search_Bar()
dico = {
"infos": prods,
"inputs": ach_form,
"user_loged": False,
"search_bar": search
}
if len(prods) == 0:
dico["nothing"] = True
else:
dico["nothing"] = False
if "userID" in request.session:
dico["user_loged"] = True
return render(request, "acheter.html", dico)
def produit(request, prod_id=0):
if prod_id != 0:
try:
prod = models.Produit.objects.get(id__exact=prod_id)
except:
return redirect("acheter_page")
search = forms.Search_Bar()
dico = {
"infos": prod,
"user_loged": False,
"search_bar": search
}
if "userID" in request.session:
dico["user_loged"] = True
return render(request, "produit.html", dico)
return redirect("acheter_page")
def supprimer_produit(request, prod_id=0):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
if prod_id > 0:
try:
prod = models.Produit.objects.get(id__exact=prod_id)
if prod.user_id.id == request.session["userID"]:
prod.delete()
return redirect('vendre_page')
infos = """ => ERROR <=
=> Invalid Utilisateur.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
except:
infos = """ => ERROR <=
=> Invalid Utilisateur.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
infos = """ => ERROR <=
=> Invalid Utilisateur.
=> Hors Connexion
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
return redirect("connexion_page")
def netoyer_produits():
for p in models.Produit.objects.filter(user_id__exact=None):
p.delete()
def affecte_achat(request, prod_id=0):
if "userID" in request.session:
try:
models.Produit.objects.get(id__exact=prod_id)
dico = {"prod_id": prod_id}
return render(request, "affecte_achat_process.html", dico)
except:
return redirect('acheter_page')
global where_to_go
where_to_go = "affecte_achat"
global args_to_go
args_to_go = [prod_id]
return redirect("connexion_page")
def supprimer_compte(request, user_id=0):
if "userID" in request.session:
try:
if models.Utilisateur.objects.get(id__exact=request.session["userID"]).verified:
if user_id > 0:
if request.session["userID"] == user_id:
try:
user = models.Utilisateur.objects.get(id__exact=user_id)
user.delete()
netoyer_produits();
return redirect("deconnexion_processus")
except:
infos = """ => ERROR <=
=> Invalid Utilisateur.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
infos = """ => ERROR <=
=> Invalid Utilisateur.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
infos = """ => ERROR <=
=> Invalid Utilisateur.
=> Hors Connexion.
"""
dico = {"info_key": infos}
return render(request, "error.html", dico)
return redirect("verifier_page")
except:
return redirect("deconnexion_processus")
return redirect("connexion_page")
def go_payment(request, prod_id=0):
if prod_id == 0:
return redirect('acheter_page')
elif not "userID" in request.session:
return redirect('connexion_page')
else:
return render(reverse('paypal/process/', args=[prod_id]))
| othLah/Sell_Buy_Web_App | Vent_Achat_Proj/Vent_Achat_App/views.py | views.py | py | 27,137 | python | en | code | 0 | github-code | 13 |
70696504979 | import pyttsx3
from prettytable import PrettyTable
import pyfiglet
table = PrettyTable(["Item number", "Price"])
welcome = pyfiglet.figlet_format("WELCOME TO KIRANA STORE", font="digital")
print(welcome)
total = 0
tu = 1
while True:
name = input("Enter the item\n")
# 'q' to exit and print the table
if (name != 'q'):
price = int(input('Enter the Price:'))
# store all the prices in 'total'
total += price
table.add_row([name, price])
continue
elif (name == 'q'):
break
table.add_row(["______", "______"])
table.add_row(["TOTAL", total])
print(table) | anant-harryfan/Python_basic_to_advance | PythonTuts/Python_Practise/Practise10.py | Practise10.py | py | 618 | python | en | code | 0 | github-code | 13 |
19262944876 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
import time
def devsleep(t):
time.sleep(t)
def navigateToGrafana(driver):
driver.get("")
time.sleep(1)
secondary_button = driver.find_element(
By.XPATH, '//*[@id="details-button"]')
secondary_button.send_keys(Keys.ENTER)
time.sleep(1)
proceed_link = driver.find_element(By.XPATH, '//*[@id="proceed-link"]')
proceed_link.send_keys(Keys.ENTER)
return proceed_link
def login(driver):
print(driver)
username = driver.find_element(
By.XPATH, '//*[@id="reactRoot"]/div/main/div[3]/div/div[2]/div/div/form/div[1]/div[2]/div/div/input')
username.send_keys('')
password = driver.find_element(By.XPATH, '//*[@id="current-password"]')
password.send_keys('')
enter = driver.find_element(
By.XPATH, '//*[@id="reactRoot"]/div/main/div[3]/div/div[2]/div/div/form/button')
enter.send_keys(Keys.ENTER)
def grabfailedssh(driver):
time.sleep(3)
driver.get(
"https://54.80.162.111/d/ir7x3Zq7z/default?orgId=1&viewPanel=3&from=now-7d&to=now")
time.sleep(10)
try:
nofail = driver.find_element(
By.XPATH, '/html/body/div[1]/div/main/div[3]/div/div/div[1]/div/div/div[1]/div/div/div[7]/div/section/div[2]/div/div')
nofailtext = nofail.text
return nofailtext
except:
fail = driver.find_element(
By.XPATH, '/html/body/div[1]/div/main/div[3]/div/div/div[1]/div/div/div[1]/div/div/div[7]/div/section/div[2]/div/div[1]/div/table')
failtext = fail.text
return failtext.to_list()
def dataframe(text):
df = pd.DataFrame(text)
def main():
driver = webdriver.Chrome(
executable_path=".\chrome_driver\chromedriver.exe")
driver.implicitly_wait(0.5)
navigateToGrafana(driver)
login(driver)
text = grabfailedssh(driver)
text_file = open("Output.txt", "w")
text_file.write(text)
text_file.close()
devsleep(3000)
main()
# def navigateGrafana(driver):
# time.sleep(3)
# browse = driver.find_element(By.XPATH, '/html/body/div[1]/div/main/div[3]/div/div/div[1]/div/div/div[1]/div/div/div[3]/div/section/div[2]/div/div[1]/div[1]/ul/li/div/div/a')
# # //*[@id="react-aria3339806887-7"]
# # /html/body/div[1]/div/nav/ul[2]/li[2]/div/a
# time.sleep(1)
# browse.click()
# #time.sleep(6)
# # failedssh = driver.find_element(By.XPATH, '')
# # failedssh.click()
# # time.sleep(0.5)
# # browse.click()
# # time.sleep(1)
# # browse.send_keys(Keys.ARROW_RIGHT)
# # browse.send_keys(Keys.ARROW_DOWN)
# # action = ActionChains(browse)
# # action.send_keys(Keys.ARROW_RIGHT)
# # action.pause(0.5)
# # action.send_keys(Keys.ARROW_DOWN)
# # action.pause(0.5)
# # action.send_keys(Keys.ARROW_DOWN)
# # action.pause(0.5)
# # action.send_keys(Keys.ENTER)
# #action.perform()
| connorfryar/lab.python | Selenium_Example/selenium_example_anonymized.py | selenium_example_anonymized.py | py | 3,116 | python | en | code | 0 | github-code | 13 |
15886750082 | """
Routes and views for the flask application.
"""
from datetime import *
from calendar import monthrange
from flask import render_template, url_for, redirect, request, session, flash
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, current_user, login_user, logout_user, login_required, UserMixin
from flask_wtf import FlaskForm, form
from sqlalchemy.sql.elements import Null
from sqlalchemy.sql.expression import false, null, text
from wtforms import StringField, PasswordField, IntegerField, FormField, RadioField, validators
from wtforms.validators import InputRequired, Email, Length, Optional
from sqlalchemy import create_engine
from Palestra import app
from .models_finale import *
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
import re
import numpy
#importa le URI per il database dal file __init__.py
from .__init__ import DB_URI, DB_URI_ISCRITTO, DB_URI_CAPO , DB_URI_ADMIN , DB_URI_ISTRUTTORE
#costanti utili
nome_giorni_della_settimana=["Lunedì","Martedì","Mercoledì","Giovedì","Venerdì","Sabato","Domenica"]
RUOLI = ["adminDB", "capo", "istruttore", "iscritto" ]
mesi=["Gennaio","Febbraio","Marzo","Aprile","Maggio","Giugno","Luglio","Agosto","Settembre","Ottobre","Novembre","Dicembre"]
#definizione delle varie engine le quali servono per eseguire le connessioni al db nel momento oppurtuno
engine = create_engine(DB_URI)
engine_iscritto = create_engine(DB_URI_ISCRITTO)
engine_capo = create_engine(DB_URI_CAPO)
engine_istruttore = create_engine(DB_URI_ISTRUTTORE)
engine_admin = create_engine(DB_URI_ADMIN)
#inizializza la libreria che gestisce i login
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
#funzione importante che serve affinché flask login tenga
#traccia dell'utente loggato, tramite il suo id di sessione
#segue l'esempio che c'è qui: https://flask-login.readthedocs.io/en/latest/#your-user-class
@login_manager.user_loader
def user_loader(id_utente):
with engine.connect().execution_options(isolation_level="READ UNCOMMITTED") as conn:
conn.begin()
try :
return Persone.query.filter_by(codice_fiscale = id_utente).first()
except:
flash("Errore")
return null
finally:
conn.close()
#classe di wtforms che contiene il form di login
#scrivendo nell'html usando la sintassi di jinja2
#{{ form.email('class_='form-control') }} si otterrà
#come risultato il campo email da riempire. In pratica
#tu nella pagina html scrivi python, questo poi viene
#tradotto in html
class LoginForm(FlaskForm):
email = StringField('Email', validators = [InputRequired(), Email(message = 'Email non valida'), Length(max = 50)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 50)])
#stessa cosa di quello sopra, ma per la registrazione
class RegistrazioneForm(FlaskForm):
codice_fiscale = StringField('Codice fiscale', validators = [InputRequired(), Length(min = 2, max = 50)])
nome = StringField('Nome', validators = [InputRequired(), Length(min = 3, max = 50)])
cognome = StringField('Cognome', validators = [InputRequired(), Length(min = 3, max = 50)])
#la data dell'iscrizione la prendiamo al momento della registrazione
email = StringField('Email', validators = [InputRequired(), Email(message = 'Email non valida'), Length(max = 50)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 1, max = 50)])
#mettiamo il conferma password? Boh, intanto c'è, poi al massimo lo eliminiamo
#chk_password = PasswordField('conferma password', validators = [InputRequired(), Length(min = 8, max = 50)])
#le opzioni di contatto
telefono = StringField('Telefono', validators = [InputRequired(), Length(max=11)])
residenza = StringField('Luogo di residenza', validators = [InputRequired()])
citta = StringField('Città di residenza', validators = [InputRequired()])
@app.route('/')
@app.route('/home')
def home():
with engine_iscritto.connect().execution_options(isolation_level="READ COMMITTED") as conn:
totale_lezioni_svolte_al_mese = text("SELECT COUNT(*) AS numcorsi, CAST(date_part('month',data)as int) AS meseint FROM sale_corsi GROUP BY date_part('month',data) ")
tipologie_corsi_query = text("SELECT distinct (nome_tipologia), descrizione FROM tipologie_corsi ")
lista_tipologie_corsi = conn.execute(tipologie_corsi_query)
#creo delle copie ed agisco su di esse xk il cursore scarica la tabella
tab_totale_lezioni_svolte_al_mese = conn.execute(totale_lezioni_svolte_al_mese)
tab_totale_lezioni_svolte_al_mese_copia = conn.execute(totale_lezioni_svolte_al_mese)
tab_totale_lezioni_svolte_al_mese_copia2 = conn.execute(totale_lezioni_svolte_al_mese)
lista_num_corsi = []
mesi_con_max_corsi = []
max_corsi = 0
for row in tab_totale_lezioni_svolte_al_mese_copia:
lista_num_corsi.append(row['numcorsi'])
if lista_num_corsi:
max_corsi = max(lista_num_corsi)
for row in tab_totale_lezioni_svolte_al_mese_copia2:
if row['numcorsi'] == max_corsi:
mesi_con_max_corsi.append(row['meseint'])
#affluenza media x ogni giorno della settimana
cont_giorno_settimana = contaGiorni()
s = text("SELECT * FROM vista_prenotazioni_settimana")
num_prenotazioni_per_giorno_settimana = conn.execute(s)
arr_medie = [0,0,0,0,0,0,0]
#calcolo le medie
for row in num_prenotazioni_per_giorno_settimana:
for i in range(0,len(arr_medie)):
if cont_giorno_settimana[i] != 0 and row[nome_giorni_della_settimana[i].lower()] is not None :
arr_medie[i] = int(row[nome_giorni_della_settimana[i].lower()]) / int(cont_giorno_settimana[i])
else:
arr_medie[i] = 0
print("medie calcolate :")
print(arr_medie)
return render_template(
'home.html',
title='Home Page', nome_mesi = mesi, lezioni_al_mese = tab_totale_lezioni_svolte_al_mese, mesi_con_piu_corsi = mesi_con_max_corsi ,num_corsi = max_corsi, medie = arr_medie , nome_giorni_della_settimana = nome_giorni_della_settimana, lista_tipologie_corsi = lista_tipologie_corsi
)
@app.route('/login', methods = ['GET','POST'])
def login():
#crea il form seguendo le istruzioni della classe login scritta sopra
form = LoginForm()
#se tutti i campi sono validati
ema = form.email.data
pwd = form.password.data
#controlla che ci sia una sola email corrispondente
if request.method == 'POST':
utente = Persone.query.filter_by(email = ema).first()
#se c'è, allora controlla anche la password (salvata criptata)
if utente is not None and utente.check_password(pwd):
#se tutto va bene, effettua il login, aggiungendo l'utente
#alla sessione e riportandolo alla schermata profilo
login_user(utente)
if ema== "admin@gmail.com" and pwd == "admin":
return redirect(url_for('admin'))
return redirect(url_for('profilo'))
#else
flash('Email o Password errati')
return redirect('/login')
return render_template('login.html', title = 'login', form = form)
@app.route('/registrazione', methods = ['GET','POST'])
def registrazione():
#si basa sulla classe definita sopra
form = RegistrazioneForm()
#if form.validate_on_submit():
if request.method == 'POST':
#prendo il contenuto del form di registrazione
codiceFisc = form.codice_fiscale.data
nom = form.nome.data
cogn = form.cognome.data
em = form.email.data
passwd = form.password.data
#per l'oggetto contatti
tel = form.telefono.data
type_tel = request.form['menuContatti']
resdz = form.residenza.data
citt = form.citta.data
persona_gia_presente = False
with engine_iscritto.connect().execution_options(isolation_level="READ COMMITTED")as conn:
lista_persone = conn.execute(text("SELECT codice_fiscale FROM persone " ))
for row in lista_persone:
if row['codice_fiscale'] == codiceFisc and persona_gia_presente == False:
persona_gia_presente = True
if persona_gia_presente == True :
flash("Sei gia iscritto? controlla meglio il tuo codice fiscale ")
else:
#creo l'oggetto utente
nuovo_utente = Persone( nome = nom,
cognome = cogn,
email = em ,
data_iscrizione = datetime.today(),
codice_fiscale = codiceFisc,
residenza = resdz,
citta = citt,
ruolo = 3 # RUOLI : adminDB=0, capo=1, istruttore=2, iscritto=3
)
nuovo_utente.set_password(passwd)
info_nuovo_utente = InfoContatti(
telefono = tel,
descrizione = type_tel,
codice_fiscale = codiceFisc,
)
db.session.add(nuovo_utente)
db.session.commit()
db.session.add(info_nuovo_utente)
db.session.commit()
flash('Registrazione completata')
return redirect('/login')
return render_template('registrazione.html', title = 'registrazione', form = form)
@app.route('/profilo', methods = ['POST','GET'])
@login_required
def profilo():
flash("Ecco il tuo profilo!")
#prendo l'id dell'utente corrente e le sue info
if current_user != None:
id = Persone.get_id(current_user)
ruolo =RUOLI[Persone.get_role(current_user)]
dati_utente_corrente = Persone.query.join(InfoContatti,Persone.codice_fiscale == InfoContatti.codice_fiscale)\
.add_columns(Persone.codice_fiscale,InfoContatti.telefono, Persone.email , Persone.cognome, Persone.nome, Persone.data_iscrizione).filter_by(codice_fiscale = id).first()
print(ruolo)
if ruolo == "istruttore" or ruolo == "iscritto": #se è istruttore o iscritto
# CANCELLAZIONE DELL'UTENTE
if "autodistruzione" in request.form and request.form['autodistruzione'] == "Elimina Profilo":
if ruolo == "iscritto":
elimina_prenotazioni = text("DELETE FROM prenotazioni WHERE codice_fiscale = :cf");
elimina_info_contatti = text("DELETE FROM info_contatti WHERE codice_fiscale = :cf")
elimina_persona = text("DELETE FROM persone WHERE codice_fiscale = :cf")
with engine_iscritto.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
conn.execute(elimina_prenotazioni, cf=id)
conn.execute(elimina_info_contatti, cf=id)
conn.execute(elimina_persona, cf=id)
flash("profilo cancellato")
logout_user()
return render_template("/home.html")
if "prenotaCorso" in request.form and request.form['prenotaCorso'] == "Prenotati":
data_prenotata=request.form['dataPrenotata'].replace(" ", "-")
id_sala=request.form['idSala']
cf_utente=request.form['codiceFiscaleUtente']
id_fascia=request.form['idFascia']
#inserisco il posto x il corso
try:
q_insert_posto = text("INSERT INTO prenotazioni(data,codice_fiscale,id_sala,id_fascia, codice_prenotazione) VALUES(:d,:cf,:ids,:idf, :cod_prenotazione) ")
with engine_iscritto.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
conn.execute(q_insert_posto,d=data_prenotata, cf=cf_utente, ids=id_sala, idf=id_fascia, cod_prenotazione = creaIDprenotazione())
except:
raise
#prenotazione della sala pesi
if "prenotaSalaPesi" in request.form and request.form['prenotaSalaPesi'] == "Prenotati":
data_prenotata=request.form['dataPrenotata'].replace(" ", "-")
id_sala=request.form['idSala']
cf_utente=request.form['codiceFiscaleUtente']
id_fascia=request.form['idFascia']
#inserisco il posto x la sala pessi
try:
q_insert_posto = text("INSERT INTO prenotazioni(data,codice_fiscale,id_sala,id_fascia, codice_prenotazione) VALUES(:d,:cf,:ids,:idf, :cod_prenotazione) ")
with engine_istruttore.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
conn.execute(q_insert_posto,d=data_prenotata, cf=cf_utente, ids=id_sala, idf=id_fascia, cod_prenotazione = creaIDprenotazione())
except:
#QUA SCATAA IL TRIGGER CHE GENERA UN ECCEZIONE!!!!!
flash("AIA SONO FINITI I POSTI")
#se è stata confermata la cancellazione cancella la prenotazione
try:
if "Conferma" in request.form and request.form['Conferma'] == "Conferma Cancellazione" and "id_prenotazione_key" in request.form :
#q_disabilita/elimina la prenotazione CON VALORE 3 perchè la ha eliminata un iscritto
q_disabilita = text("UPDATE prenotazioni SET eliminata = 3 WHERE codice_prenotazione=:c ")
with engine_iscritto.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
conn.execute(q_disabilita, c=request.form['id_prenotazione_key'])
except:
raise
#CANCELLA IL CORSO
if request.method == "POST" and "cancellaCorso" in request.form:
try:
id_corso = request.form['id_corso_da_cancellare']
dataCorso = request.form['dataCorso']
id_fascia = request.form['id_fascia']
id_sala = request.form['id_sala']
with engine_istruttore.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
s = text("UPDATE prenotazioni SET eliminata = 2 WHERE data = :data AND id_sala = :ids AND id_fascia = :idf ")
conn.execute(s, data = dataCorso, ids = id_sala, idf=id_fascia)
s = text("DELETE FROM sale_corsi WHERE id_corso = :idc AND id_sala = :ids AND data = :data ")
conn.execute(s,idc=id_corso,ids = id_sala , data = dataCorso )
s = text("DELETE FROM corsi WHERE id_corso = :idc")
conn.execute(s,idc=id_corso)
except:
raise
#prenotazioni gia fatte x questo utente
q_lista_prenotazioni = text("SELECT p.data, p.id_sala, fs.id_fascia, p.codice_prenotazione, fs.inizio, fs.fine, "
"CASE WHEN s.solo_attrezzi is TRUE THEN 'Pesi' "
"WHEN s.solo_attrezzi is FALSE THEN 'Corso' END tipo_sala "
"FROM prenotazioni p JOIN sale s ON p.id_sala = s.id_sala JOIN fascia_oraria fs ON p.id_fascia=fs.id_fascia WHERE p.codice_fiscale=:id_utente AND p.eliminata IS NULL" )
with engine_iscritto.connect().execution_options(isolation_level="READ COMMITTED") as conn:
tab_prenotazioni_effettuate = conn.execute(q_lista_prenotazioni,id_utente=id)
if ruolo == "istruttore": # istruttore
#corsi creati da questo istruttore
q_corsi_creati = text("SELECT sc.data, c.id_corso, f.inizio , f.fine, c.nome_corso, tc.nome_tipologia, f.id_fascia, sc.id_sala "
" FROM corsi c JOIN sale_corsi sc ON sc.id_corso=c.id_corso JOIN fascia_oraria f ON sc.id_fascia= f.id_fascia JOIN tipologie_corsi tc ON c.id_tipologia = tc.id_tipologia "
" WHERE c.codice_fiscale_istruttore = :id_utente "
" ORDER BY sc.data, f.inizio ASC")
#READ UNCOMMITTED perchè un corso non puo essere toccato da un altro istruttore o capo ma solo da chi lo ha creato
with engine_istruttore.connect().execution_options(isolation_level="READ UNCOMMITTED") as conn:
tab_corsi_creati = conn.execute(q_corsi_creati,id_utente=id)
if ruolo == "istruttore":
return render_template("profilo.html",title="profilo", dati_utente = dati_utente_corrente, ruolo=ruolo, prenotazioni_effettuate=tab_prenotazioni_effettuate, tab_corsi_creati = tab_corsi_creati)
if ruolo == "iscritto":
return render_template("profilo.html",title="profilo", dati_utente = dati_utente_corrente, ruolo=ruolo, prenotazioni_effettuate=tab_prenotazioni_effettuate)
if ruolo == "capo":
if palestra_gia_creata() == True:
mostra_link_creazione_palestra = "False";
else:
mostra_link_creazione_palestra = "True";
#puo fare upgrade da iscritto a istruttore e viceversa
if 'modificavalori' in request.form and request.form['modificavalori'] == "ModificaPermessi":
cf_passato = request.form['id_passato']
nome_radio_button = cf_passato + "_radio"
v = request.form[nome_radio_button]
print(v)
with engine_capo.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
if v == "istruttore":
s = text("UPDATE persone SET ruolo = 2 WHERE codice_fiscale = :cf AND ruolo <> '2'")
conn.execute(s,cf=cf_passato)
elif v == "iscritto":
s = text("UPDATE persone SET ruolo = 3 WHERE codice_fiscale = :cf AND ruolo <> '3' " )
conn.execute(s,cf=cf_passato)
#mostra tutti gli iscritti e istruttori
with engine_capo.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
s = text("SELECT p.codice_fiscale, p.nome, p.cognome, i.telefono , p.ruolo FROM persone p JOIN info_contatti i ON p.codice_fiscale=i.codice_fiscale WHERE p.ruolo='3' OR p.ruolo='2' ORDER BY p.ruolo ")
lista_persone = conn.execute(s)
return render_template("profilo.html", title="profilo", lista_persone = lista_persone, dati_utente = dati_utente_corrente, ruolo=ruolo, mostra_link_creazione_palestra = mostra_link_creazione_palestra )
else :
return render_template("registrazione.html", title="registrazione")
@app.route('/logout')
@login_required
def logout():
#elimina dalla sessione l'utente attuale
logout_user()
return redirect('/home')
@login_required
@app.route('/corsi', methods = ['POST', 'GET'])
def corsi():
#controlla se va bene che sia messa solo sta parte dentro la richiesta post
if request.method == 'POST':
data = request.form['dataSelezionata']
tmp_data = data[2 : len(data) : ]
data_for_DB = str(datetime.strptime(tmp_data,"%y %m %d")).split(' ')
data_for_DB = data_for_DB[0]
id_utente = Persone.get_id(current_user)
ruolo = RUOLI[Persone.get_role(current_user)]
intGiorno_settimana = data_to_giorno_settimana(data_for_DB)
is_ricerca_setted = request.method == 'POST' and "ricerca" in request.form and request.form['ricerca'] == "Cerca"
if "dataSelezionata" in request.form:
if ruolo == "istruttore":
#INSERIMENTO DEL CORSO
if request.method == 'POST'and "inserimentoCorso" in request.form and request.form['inserimentoCorso'] == "Inserisci il corso":
nome_lista_delle_fasce = []
for namesID in request.form:
if re.match('nomeRadioIdFascia [0-9]*',namesID):
nome_lista_delle_fasce.append(namesID)
if nome_lista_delle_fasce:
for e in nome_lista_delle_fasce:
v = request.form[e]
try:
print(v)
id_tipologia_corso = request.form['tipologie']
nome_corso = request.form['nomeCorso']
nuovo_id_corso = creaIDcorso()
idfascia_e_id_sala = v.split(' ')
id_fascia = idfascia_e_id_sala[0].split('_')[1]
id_sala = idfascia_e_id_sala[1].split('_')[1]
print(id_fascia )
print(id_sala )
#inserisce il corso
with engine_istruttore.connect().execution_options(isolation_level="REPEATABLE READ") as conn :
prep_query2 = text("INSERT INTO corsi( id_corso, nome_corso, codice_fiscale_istruttore , id_tipologia) VALUES( :idc , :nc ,:cfi, :idt)")
conn.execute(prep_query2, idc = nuovo_id_corso , nc=nome_corso , cfi= id_utente, idt=id_tipologia_corso )
prep_query = text("INSERT INTO sale_corsi(id_sala, id_corso, data, id_fascia) VALUES(:ids , :idc , :d , :idf)")
conn.execute(prep_query, ids=id_sala, idc=nuovo_id_corso, d= data_for_DB, idf=id_fascia)
except:
flash("Aia! sembra che qualcuno ti abbia preceduto, rifai l'operazione")
else:
flash("inserimento riuscito!")
else :
flash("seleziona almeno una fascia oraria!")
if is_ricerca_setted :
if current_user != None and "ora_iniziale_ricerca" in request.form and "ora_finale_ricerca" in request.form:
input_ora_inizio = request.form['ora_iniziale_ricerca']
input_ora_fine = request.form['ora_finale_ricerca']
if input_ora_inizio == '' or input_ora_fine == '':
flash("riempire i campi")
return render_template( 'corsi.html',title='Corsi Disponibili', data = data, ruolo = ruolo)
if ruolo == 'capo':
#lista prenotazioni in un certo giorno
with engine_capo.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
s = text("SELECT pr.id_sala , f.inizio, f.fine , pr.codice_fiscale , p.nome , p.cognome, i.telefono "
"FROM prenotazioni pr JOIN fascia_oraria f ON (f.id_fascia = pr.id_fascia) JOIN persone p ON (p.codice_fiscale = pr.codice_fiscale) JOIN info_contatti i ON (i.codice_fiscale = pr.codice_fiscale) "
"WHERE f.inizio >= :oraInizio AND f.fine <= :oraFine AND f.giorno = :intGiorno AND pr.data = :input_data "
)
tab_lista_prenotazioni = conn.execute(s, oraInizio=input_ora_inizio , oraFine = input_ora_fine ,intGiorno = intGiorno_settimana, input_data = data_for_DB )
return render_template( 'corsi.html',title='Corsi Disponibili', data = data, ruolo = ruolo, tab_lista_prenotazioni = tab_lista_prenotazioni )
if ruolo == "iscritto" or ruolo == "istruttore" : #ricerca corsi disponibili
with engine_iscritto.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
q_lista_tipologie = text("SELECT id_tipologia, nome_tipologia FROM tipologie_corsi ")
lista_tipologie_tab = conn.execute(q_lista_tipologie)
s = text("SELECT sc.id_fascia,f.inizio,f.fine, sc.id_sala,tc.nome_tipologia, pi.nome AS nome_istruttore, pi.cognome AS cognome_istruttore "
"FROM sale_corsi sc JOIN fascia_oraria f ON sc.id_fascia=f.id_fascia "
"JOIN sale s ON sc.id_sala= s.id_sala JOIN corsi co ON co.id_corso=sc.id_corso JOIN persone pi ON (pi.codice_fiscale =co.codice_fiscale_istruttore AND co.codice_fiscale_istruttore <> :cf ) JOIN tipologie_corsi tc ON co.id_tipologia=tc.id_tipologia "
"WHERE f.inizio >= :oraInizio AND f.fine <= :oraFine AND f.giorno = :intGiorno AND sc.data = :input_data "
"AND s.posti_totali > (SELECT Count(*)AS numPrenotati "
"FROM prenotazioni pr JOIN sale_corsi sc1 ON (sc1.id_sala=sc.id_sala AND pr.id_sala= sc.id_sala) "
"JOIN fascia_oraria f1 ON f1.id_fascia=f.id_fascia "
"WHERE pr.data = :input_data "
"AND pr.eliminata IS NULL) "
"AND f.id_fascia NOT IN (SELECT id_fascia FROM prenotazioni WHERE data = :input_data AND codice_fiscale = :cf AND eliminata IS NULL) "
)
percentuale = policy_presenti(data_for_DB)
s2 = text("SELECT sc.id_fascia,f.inizio,f.fine, sc.id_sala,tc.nome_tipologia, pi.nome AS nome_istruttore, pi.cognome AS cognome_istruttore "
"FROM sale_corsi sc JOIN fascia_oraria f ON sc.id_fascia=f.id_fascia "
"JOIN sale s ON sc.id_sala= s.id_sala JOIN corsi co ON co.id_corso=sc.id_corso JOIN persone pi ON (pi.codice_fiscale =co.codice_fiscale_istruttore AND co.codice_fiscale_istruttore <> :cf ) JOIN tipologie_corsi tc ON co.id_tipologia=tc.id_tipologia "
"WHERE f.inizio >= :oraInizio AND f.fine <= :oraFine AND f.giorno = :intGiorno AND sc.data = :input_data "
"AND s.posti_totali * :percentuale /100 > (SELECT Count(*) AS numPrenotati "
"FROM prenotazioni pr JOIN sale_corsi sc1 ON (sc1.id_sala=sc.id_sala AND pr.id_sala= sc.id_sala) "
"JOIN fascia_oraria f1 ON f1.id_fascia=f.id_fascia "
"WHERE pr.data = :input_data "
"AND pr.eliminata IS NULL) "
"AND f.id_fascia NOT IN (SELECT id_fascia FROM prenotazioni WHERE data = :input_data AND codice_fiscale = :cf AND eliminata IS NULL) "
)
q_sale_pesi_libere = text(
"SELECT s1.id_sala, f1.id_fascia , f1.inizio, f1.fine "
"FROM sale s1 JOIN fascia_oraria f1 ON (f1.giorno = :intGiorno AND f1.inizio >= :oraInizio AND f1.fine <= :oraFine) "
"WHERE s1.solo_attrezzi IS TRUE "
"AND s1.posti_totali > (SELECT count(*) "
"FROM prenotazioni p JOIN sale s ON p.id_sala = s1.id_sala "
"WHERE p.data= :input_data AND s.solo_attrezzi IS TRUE AND p.eliminata IS NULL AND p.id_fascia = f1.id_fascia )"
"AND f1.id_fascia NOT IN (SELECT p.id_fascia "
"FROM prenotazioni p "
"WHERE p.data= :input_data AND p.codice_fiscale = :cf AND p.eliminata IS NULL) "
"AND f1.id_fascia NOT IN (SELECT id_fascia "
"FROM sale_corsi sc JOIN corsi c ON (sc.id_corso= c.id_corso ) "
"WHERE c.codice_fiscale_istruttore = :cf AND sc.data = :input_data ) "
)
try:
with engine_iscritto.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
if percentuale is not False:
corsi_liberi = conn.execute(s2, oraInizio=input_ora_inizio , oraFine = input_ora_fine ,intGiorno = intGiorno_settimana, input_data = data_for_DB, cf= id_utente, percentuale=percentuale )
else :
corsi_liberi = conn.execute(s, oraInizio=input_ora_inizio , oraFine = input_ora_fine ,intGiorno = intGiorno_settimana, input_data = data_for_DB, cf= id_utente )
sale_pesi_libere = conn.execute(q_sale_pesi_libere, oraInizio=input_ora_inizio , oraFine = input_ora_fine ,intGiorno = intGiorno_settimana, input_data = data_for_DB,cf= id_utente )
except:
raise
if ruolo == "istruttore":
q_sale_libere = text(
"SELECT s.id_sala , f1.inizio ,f1.fine ,f1.id_fascia, s.posti_totali "
"FROM sale s JOIN fascia_oraria f1 ON ( f1.inizio >= :oraInizio AND f1.fine <= :oraFine AND f1.giorno = :g ) "
"WHERE s.id_sala NOT IN (SELECT sc.id_sala FROM sale_corsi sc JOIN fascia_oraria f ON sc.id_fascia = f.id_fascia WHERE f1.id_fascia = f.id_fascia AND sc.data = :dataDB) "
"AND s.solo_attrezzi IS FALSE "
"GROUP BY s.id_sala, f1.id_fascia "
"ORDER BY f1.id_fascia "
)
try:
with engine_istruttore.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
sale_disp_con_fasce = conn.execute(q_sale_libere, dataDB=data_for_DB, oraInizio = input_ora_inizio, oraFine = input_ora_fine , g= intGiorno_settimana)
except:
raise
return render_template( 'corsi.html',title='Corsi Disponibili', data = data, ruolo = ruolo, sale_disp_con_fasce =sale_disp_con_fasce , info_corsi =corsi_liberi, lista_tipologie_tab = lista_tipologie_tab,cf_utente = id_utente, sale_pesi_libere = sale_pesi_libere )
return render_template( 'corsi.html',title='Corsi Disponibili', data = data, ruolo = ruolo, info_corsi =corsi_liberi, lista_tipologie_tab = lista_tipologie_tab,cf_utente = id_utente, sale_pesi_libere = sale_pesi_libere )
else:
return render_template( 'corsi.html',title='Corsi Disponibili', data = data)
else:
return redirect(url_for("home"))
@app.route('/istruttori')
@login_required
def istruttori():
with engine_istruttore.connect().execution_options(isolation_level="READ UNCOMMITTED") as conn:
q = text("SELECT p.nome,p.cognome,i.telefono FROM persone p JOIN info_contatti i ON p.codice_fiscale=i.codice_fiscale WHERE p.ruolo=2")
lista_istruttori = conn.execute(q)
return render_template('istruttori.html',title='Elenco istruttori',lista_istruttori = lista_istruttori )
@app.route('/creazionePalestra',methods=['POST', 'GET'])
@login_required
def creazionePalestra():
tipologie_presenti = []
if "AggiungiTipoCorso" in request.form and request.form['AggiungiTipoCorso'] is not None and "nomeTipologiaCorso" in request.form and request.form['nomeTipologiaCorso'] is not None :
nome_tipo = request.form['nomeTipologiaCorso']
descrizione = request.form['descrizioneTipologiaCorso']
#tipologie gia inserite
with engine_capo.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
res = conn.execute(" SELECT nome_tipologia FROM tipologie_corsi")
tipologie_presenti = []
for row in res:
tipologie_presenti.append(row['nome_tipologia'])
#se non c'è la tipologia la inserisce
if nome_tipo not in tipologie_presenti:
s = text("INSERT INTO tipologie_corsi(id_tipologia,nome_tipologia,descrizione) VALUES( :id, :n, :d )")
conn.execute(s, id=creaIDtipologiaCorso(), n=nome_tipo , d=descrizione )
flash("inserimento della tipologia riuscito")
else:
flash('la tipologia è gia presente')
if "inviaFasce" in request.form:
copia_POST_array = numpy.array(list(request.form))
for i in range(len(copia_POST_array)-1):
if re.match("[1-7]_inizioFascia_[1-9]", str(copia_POST_array[i]) ) and re.match("[1-7]_fineFascia_[1-9]", str(copia_POST_array[i+1]) ) :
s_fasciaInizio = str(copia_POST_array[i])
s_fasciaFine = str(copia_POST_array[i+1])
args_fascia_inizio = s_fasciaInizio.split('_')
args_fascia_fine = s_fasciaFine.split('_')
intGiorno = args_fascia_inizio[0]
numFascia = args_fascia_inizio[2]
ora_inizio = request.form[intGiorno + "_" + args_fascia_inizio[1] + "_" + numFascia]
ora_fine = request.form[intGiorno + "_" + args_fascia_fine[1] + "_" + numFascia]
print(ora_inizio)
print(ora_fine)
#inserimento fascia oraria read uncommitted xk tanto viene inserita solo la prima volta dal primo capo
with engine_capo.connect().execution_options(isolation_level="READ UNCOMMITTED") as conn:
s = text("INSERT INTO Fascia_oraria(id_fascia, giorno, inizio, fine) VALUES (:id, :g, :ora_i, :ora_f)" )
conn.execute(s,id=i, g =intGiorno, ora_i=ora_inizio, ora_f= ora_fine )
with engine_capo.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
s = text("SELECT nome_tipologia FROM tipologie_corsi" )
el_tipologie = conn.execute(s)
return render_template('creazionePalestra.html',title='Crea La Palestra',nome_giorni_della_settimana = nome_giorni_della_settimana, tipologie_corsi =tipologie_presenti, el_tipologie = el_tipologie)
@app.route('/calendario', methods=['POST', 'GET'])
@login_required
def calendario():
#calendario
if current_user != None:
ruolo = Persone.get_role(current_user)
nome_giorni_della_settimana=["Lunedì","Martedì","Mercoledì","Giovedì","Venerdì","Sabato","Domenica"]
data_corrente = datetime.today()
anno = data_corrente.year
mese = data_corrente.month
data_corrente = {"anno" : anno , "mese" : mese , "giorno" : data_corrente.day } # anno mese giorno
if request.method == 'POST':
if request.form['cambiaMese'] == '<-':
mese = int(request.form['meseCorrenteSelezionato'])-1
if mese == 0:
anno = int(request.form['annoCorrenteSelezionato'])-1
mese = 12
if request.form['cambiaMese'] == '->':
mese = int(request.form['meseCorrenteSelezionato'])+1
if mese == 13:
anno = int(request.form['annoCorrenteSelezionato'])+1
mese = 1
primo_giorno_indice = datetime(anno,mese,1).weekday()
primo_giorno_nome = nome_giorni_della_settimana[primo_giorno_indice]
num_giorni = monthrange(anno, mese)[1]
return render_template('calendario.html',title='calendario',
meseNumerico=mese, num_giorni=num_giorni, nomeMese=mesi[mese-1],annoNumerico = anno,
dataCorrente = data_corrente,primo_giorno_nome = primo_giorno_nome, nome_giorni_settimana=nome_giorni_della_settimana,
indice_settimana_del_primo_giorno = primo_giorno_indice,
ruolo = ruolo)
@app.route('/admin', methods=['POST', 'GET'])
@login_required
def admin():
if request.method == 'POST':
nome = request.form['nome']
cf = request.form['cf']
cognome = request.form['cognome']
email = request.form['email']
pwd = request.form['psw']
cell = request.form['cell']
tipoCell = request.form['cellAdmin']
residenza = request.form['residenza']
citta = request.form['citta']
try:
with engine_admin.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = text("INSERT INTO persone(codice_fiscale,nome,cognome,email,data_iscrizione,password,citta,residenza ,ruolo) VALUES (:codice_fiscale, :nome, :cognome, :email, :data_iscrizione, :password,:citta,:res,1)")
conn.execute(s,codice_fiscale=cf, nome=nome, cognome=cognome,email=email, data_iscrizione = datetime.today(),password=generate_password_hash(pwd, method = 'sha256', salt_length = 8),citta=citta,res=residenza)
s = text("INSERT INTO info_contatti(codice_fiscale,telefono,descrizione) VALUES (:codice_fiscale,:cellulare,:descrizione)")
conn.execute(s,codice_fiscale=cf,cellulare=cell, descrizione=tipoCell)
flash("inserimento")
except:
flash("errore nell'inserimento")
raise
return render_template("admin.html" , title='Amministrazione')
class CreaSalaForm(FlaskForm):
nPosti = IntegerField('Numero posti totali', validators = [InputRequired(), Length(min = 1, max = 3)])
attrezzi = RadioField('Seleziona se contiene solo attrezzi', choices=[('True','SI'),('False','NO')])
@app.route('/crea_sala', methods=['POST', 'GET'])
@login_required
def crea_sala():
if "Submit" not in request.form :
id_next_sala = creaIDsala()
form = CreaSalaForm()
if request.method == 'POST' and request.form['Submit'] == "Invia":
id_next_sala = creaIDsala()
posti = form.nPosti.data
attrez = eval(form.attrezzi.data)
nuova_sala = Sale(
id_sala = id_next_sala,
posti_totali = posti,
solo_attrezzi = attrez
)
db.session.add(nuova_sala)
db.session.commit()
flash('Creazione completata')
return render_template("creazioneSala.html", title = "Crea una nuova sala nella tua palestra", form = form, id_sala = id_next_sala)
@app.route('/policy_occupazione', methods=['POST', 'GET'])
@login_required
def policy_occupazione():
#update della policy modificata
if "confermaModifica" in request.form and "dataInizioModificata" in request.form and "dataFineModificata" in request.form and "percModificata" in request.form and "id_policy" in request.form:
with engine_capo.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = text("UPDATE policy_occupazione SET data_inizio=:inizio , data_fine = :fine , percentuale_occupabilità = :perc WHERE id_policy=:id")
conn.execute(s, inizio=request.form['dataInizioModificata'],fine=request.form['dataFineModificata'], perc = request.form['percModificata'], id=request.form['id_policy'] )
#inserimento di una policy
if request.method== 'POST' and 'confermaPolicy' in request.form:
input_data_inizio = request.form['dpcm-start']
input_data_fine = request.form['dpcm-end']
perc = request.form['perc']
with engine_capo.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = text("SELECT id_policy "
"FROM policy_occupazione p "
"WHERE p.id_policy in ( SELECT p2.id_policy "
"FROM policy_occupazione p2 "
"WHERE p2.id_policy = p.id_policy "
"AND "
"( "
"( :di BETWEEN p2.data_inizio and p2.data_fine OR :df BETWEEN p2.data_inizio and p2.data_fine) "
"OR :di < now() "
"OR :df < now() "
"OR (:di <= p2.data_inizio AND :df >= p2.data_fine) "
") "
")"
)
lista_policy_in_contrasto = conn.execute(s, di=input_data_inizio , df=input_data_fine)
errore = "ok"
for row in lista_policy_in_contrasto:
if row['id_policy'] != null:
errore = "Controlla meglio le date"
if errore != "ok":
flash(errore)
else:
inserimento_policy = text("INSERT INTO policy_occupazione(data_inizio,data_fine, percentuale_occupabilità) VALUES(:i , :f, :p) ")
with engine_capo.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
conn.execute(inserimento_policy, i=input_data_inizio, f=input_data_fine, p=perc)
flash("inserimento riuscito")
#stampa tutte le policy
tutte_le_policy = text("SELECT * FROM policy_occupazione ")
with engine_capo.connect().execution_options(isolation_level="REPEATABLE READ") as conn:
policies = conn.execute(tutte_le_policy)
return render_template("policyOccupazione.html", title = "Occupazione", policies = policies )
@app.route('/corsi/lista', methods=['POST', 'GET'])
@login_required
def lista_corsi():
#----------------------------------------------------DA FAREEEEE
s = text("SELECT distinct c.nome_corso, p.nome, p.cognome,t.nome_tipologia, t.descrizione , i.telefono "
"FROM corsi c JOIN persone p ON (p.codice_fiscale = c.codice_fiscale_istruttore AND p.ruolo = 2) "
"JOIN tipologie_corsi t ON t.id_tipologia = c.id_tipologia "
"JOIN info_contatti i ON c.codice_fiscale_istruttore = i.codice_fiscale "
"GROUP BY c.nome_corso, c.codice_fiscale_istruttore , p.nome, p.cognome ,t.nome_tipologia, t.descrizione, i.telefono ;"
)
with engine_iscritto.connect() as conn:
tab_lista_corsi = conn.execute(s)
return render_template('lista_corsi.html', tab_lista_corsi = tab_lista_corsi)
@app.route('/lista_prenotazioni', methods=['POST', 'GET'])
@login_required
def lista_prenotazioni():
if current_user != None:
query_lista_prenotazioni_sale_pesi = text("SELECT pr.data, pr.codice_fiscale, pr.codice_prenotazione , pr.id_sala, pe.nome , pe.cognome , pe.email , f.inizio, f.fine , i.telefono "
"FROM prenotazioni pr JOIN persone pe ON pr.codice_fiscale = pe.codice_fiscale "
"JOIN info_contatti i ON i.codice_fiscale = pr.codice_fiscale "
"JOIN fascia_oraria f ON f.id_fascia = pr.id_fascia "
"JOIN sale s ON pr.id_sala = s.id_sala "
"WHERE pr.eliminata IS NULL ORDER BY pr.data "
)
query_lista_prenotazioni_sale_corsi = text("SELECT pr.data, pr.codice_fiscale, pr.codice_prenotazione , pr.id_sala, pe.nome , pe.cognome , pe.email , f.inizio, f.fine , i.telefono , c.nome_corso , t.nome_tipologia "
"FROM prenotazioni pr JOIN persone pe ON pr.codice_fiscale = pe.codice_fiscale JOIN info_contatti i ON i.codice_fiscale = pr.codice_fiscale "
"JOIN fascia_oraria f ON f.id_fascia = pr.id_fascia "
"JOIN sale_corsi s ON pr.id_sala = s.id_sala "
"JOIN corsi c ON s.id_corso = c.id_corso "
"JOIN tipologie_corsi t ON c.id_tipologia = t.id_tipologia "
"WHERE pr.eliminata IS NULL ORDER BY pr.data "
)
query_prenotazioni_eliminate = text("SELECT pr.data, pr.codice_fiscale, pr.codice_prenotazione , pr.id_sala, pe.nome , pe.cognome , pe.email , f.inizio, f.fine, i.telefono "
"FROM prenotazioni pr JOIN persone pe ON pr.codice_fiscale = pe.codice_fiscale JOIN info_contatti i ON i.codice_fiscale = pr.codice_fiscale "
"JOIN fascia_oraria f ON f.id_fascia = pr.id_fascia "
"WHERE pr.eliminata IS NOT NULL ORDER BY pr.data "
)
with engine_capo.connect().execution_options(isolation_level="READ COMMITTED") as conn:
tab_lista_prenotazioni_sale_pesi = conn.execute(query_lista_prenotazioni_sale_pesi)
tab_lista_prenotazioni_eliminate = conn.execute(query_prenotazioni_eliminate)
tab_lista_prenotazioni_sale_corsi = conn.execute(query_lista_prenotazioni_sale_corsi)
return render_template("lista_prenotazioni.html", tab_lista_prenotazioni_sale_pesi = tab_lista_prenotazioni_sale_pesi , tab_lista_prenotazioni_eliminate = tab_lista_prenotazioni_eliminate, tab_lista_prenotazioni_sale_corsi = tab_lista_prenotazioni_sale_corsi)
return render_template("lista_prenotazioni.html")
#-------------------UTILI--------------
def creaIDsala():
try:
with engine_iscritto.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = "SELECT COUNT(id_sala) AS num_sala FROM SALE "
res = conn.execute(s)
for row in res:
num_sala = row['num_sala']
break
next_id = int(num_sala) + 1
return next_id
except:
flash("errore ricarica la pagina")
raise
def data_to_giorno_settimana(dataString):
arr = []
arr = dataString.split('-')
giorno = arr[2]
mese = arr[1]
anno = arr[0]
d = date(int(anno),int(mese),int(giorno))
return int(d.weekday())+1
def creaIDcorso():
try:
with engine_iscritto.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = "SELECT COUNT(id_corso) AS num_corso FROM corsi "
res = conn.execute(s)
for row in res:
num_corso = row['num_corso']
break
next_id = int(num_corso) + 1
return next_id
except:
flash("errore ricarica la pagina")
def creaIDtipologiaCorso():
try:
with engine_capo.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = "SELECT COUNT(*) AS num_tipologie FROM tipologie_corsi "
res = conn.execute(s)
for row in res:
num_tipologie = row['num_tipologie']
break
next_id = int(num_tipologie) + 1
return next_id
except:
flash("errore ricarica la pagina")
def creaIDprenotazione():
with engine_iscritto.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
s = "SELECT COUNT(*) AS num_prenotazioni FROM prenotazioni"
res = conn.execute(s)
for row in res:
num_prenotazioni = row['num_prenotazioni']
break
next_id = int(num_prenotazioni) + 1
return next_id
def contaGiorni():
num_gs = [0,0,0,0,0,0,0]
with engine_iscritto.connect().execution_options(isolation_level="READ COMMITTED") as conn:
#la data piu vechia (circa la creazione della palestra è la data di iscrizione del capo piu vecchia)
query_data_piu_vecchia = text("SELECT data_iscrizione as data_creazione FROM persone WHERE ruolo = 1 ORDER BY data_iscrizione ASC limit 1")
tab_data_vecchia = conn.execute(query_data_piu_vecchia)
for row in tab_data_vecchia:
data_piu_vecchia = row['data_creazione']
#sono di tipo datetime.date
data_corrente = date.today()
while data_piu_vecchia < data_corrente:
gs = data_piu_vecchia.weekday()
num_gs[gs] = num_gs[gs] + 1
data_piu_vecchia = data_piu_vecchia + timedelta(days=1)
return num_gs
def palestra_gia_creata():
#raed uncommitted perchè il capo che crea la palestra è uno solo
with engine_capo.connect().execution_options(isolation_level="READ UNCOMMITTED") as conn:
s = text("SELECT COUNT(*) as num_fasce FROM fascia_oraria")
tab = conn.execute(s)
for row in tab:
if row['num_fasce'] != 0 :
return True
else:
return False
def policy_presenti(data):
s = text("SELECT percentuale_occupabilità FROM policy_occupazione WHERE data_inizio = :input_data OR :input_data < data_fine OR :input_data = data_fine")
with engine_iscritto.connect().execution_options(isolation_level="SERIALIZABLE") as conn:
res = conn.execute(s, input_data = data)
for row in res:
val = row['percentuale_occupabilità']
if val is None :
return False
else:
return int(val)
| SpiderPorkPotter/Palestra | Palestra/views.py | views.py | py | 49,842 | python | it | code | 0 | github-code | 13 |
72201778579 | # Question Category : Arrays
# Difficulty : Medium
# Link to Leetcode Problem : https://leetcode.com/problems/product-of-array-except-self/
# NeedCode Video Solution : https://youtu.be/bNvIQI2wAjk
# Obs.: make two passes, first in-order, second in-reverse, to compute products
# Problem Desciption :
"""
Given an integer array nums, return an array answer such that answer[i] is equal to the product of all the elements of nums except nums[i].
The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
You must write an algorithm that runs in O(n) time and without using the division operation.
Example 1:
Input: nums = [1,2,3,4]
Output: [24,12,8,6]
Example 2:
Input: nums = [-1,1,0,-3,3]
Output: [0,0,9,0,0]
"""
class MySolution(object):
"""
I **DID** Use Division Operator
"""
def productExceptSelf(self, nums: list[int]) -> list[int]:
Product = 1
Result = []
if 0 not in nums:
for n in nums:
Product *= n
for n in nums:
Result.append(int(Product / n))
else:
if nums.count(0) > 1:
return [0] * len(nums)
for n in nums:
if n != 0:
Product *= n
for n in nums:
if n == 0:
Result.append(Product)
else:
Result.append(0)
return Result
class NeetCodeSolution(object):
def productExceptSelf(self, nums: list[int]) -> list(int):
result = [1] * len(nums)
CurrentPrefixProduct = 1
for i in range(len(nums)):
result[i] = CurrentPrefixProduct
CurrentPrefixProduct *= nums[i]
CurrentPostFixProduct = 1
for i in range(len(nums) - 1, -1, -1):
result[i] *= CurrentPostFixProduct
CurrentPostFixProduct *= nums[i]
return result
| MSoltanovUSP/LeetCode | Blind-75-LeetCode-Questions/Question04-Product_of_Array_Except_Self.py | Question04-Product_of_Array_Except_Self.py | py | 1,928 | python | en | code | 0 | github-code | 13 |
14739142286 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import urllib.request
import tensorflow as tf
from matplotlib import pyplot as plt
class DataManager:
def __init__(self):
self.X = np.zeros(0)
self.Y = np.zeros(0)
self.training_set_size = 0
def init_dir(self, dir_path):
"""Ensures directory dir_path exists"""
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def download_file(self, filename, url):
"""Download file from url if it isn't already on disk"""
if os.path.isfile(filename):
return
urllib.request.urlretrieve(url, filename)
def get_batch(self, N):
print("Function 'get_batch' not implemented in child class!")
def summary(self):
print("Found", self.X.shape[0], "images")
print("size:", self.X.shape[1], "x", self.X.shape[2])
print("type:", self.X.dtype)
class DspritesManager(DataManager):
def __init__(self, batch_size=32, color=False):
super(DspritesManager, self).__init__()
self.batch_size = batch_size
self.color = color
self.data_shape = (64, 64)
self.filepath = "data/dsprites"
self.filename = "dsprites.npz"
self.in_channels = 3 if color else 1
self.train_input_shape = tf.TensorShape([64, 64, self.in_channels])
self.init_dsprites()
def init_dsprites(self):
self.init_dir(self.filepath)
os.chdir(self.filepath)
self.download_dsprites()
self.X = np.load(self.filename)["imgs"]
self.training_set_size = self.X.shape[0]
self.make_dataset()
def download_dsprites(self):
"""Download dsprites to disk"""
url = "https://github.com/deepmind/dsprites-dataset/raw/master"\
"/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz"
self.download_file(self.filename, url)
def get_nth_sample(self, n):
"""
Returns the nth sample in the shape of self.train_input_shape
Useful for running a specific example through the network
"""
x = self.X[n, :].astype(np.float32)
return self.add_color_to_sample(x) if self.color else x
def add_color_to_sample(self, x):
if not self.color:
return x
rand = np.random.uniform(size=x.shape)
mask = x==1
r = x.copy()
r[mask] = np.random.uniform()
g = x.copy()
g[mask] = np.random.uniform()
b = x.copy()
b[mask] = np.random.uniform()
x = np.stack((r, g, b), axis=-1)
return x
def get_batch(self, N):
if (N != self.batch_size):
self.batch_size = N
self.dataset = self.dataset.batch(batch_size=self.batch_size)
self.dataset_iterator = iter(self.dataset)
return next(self.dataset_iterator)
def show_nth_sample(self, n):
"""Plots the nth sample from the dataset"""
x = self.get_nth_sample(n).squeeze()
plt.imshow(x)
plt.show()
def show_random_sample(self):
"""Plots a single random sample from the dataset"""
n = np.random.randint(self.training_set_size)
self.show_nth_sample(n)
def generate(self):
while True:
n = np.random.randint(self.training_set_size)
x = self.X[n, :].astype(np.float32)
x = self.add_color_to_sample(x)
yield x
def make_dataset(self):
self.dataset = tf.data.Dataset.from_generator(self.generate, tf.float32, output_shapes=self.train_input_shape)
self.dataset = self.dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
self.dataset = self.dataset.batch(batch_size=self.batch_size)
self.dataset_iterator = iter(self.dataset)
| alexbooth/Beta-VAE-Tensorflow-2.0 | dataset.py | dataset.py | py | 3,884 | python | en | code | 14 | github-code | 13 |
29572469405 | import numpy as np
import pandas as pd
import json
import transformers
import tensorflow as tf
from sklearn.metrics.pairwise import cosine_similarity
from transformers import logging
logging.set_verbosity_error()
import warnings
warnings.filterwarnings('ignore')
def init():
global model
global tokenizer
global default_list
tokenizer = transformers.BertTokenizerFast.from_pretrained('onlplab/alephbert-base')
bert_base = transformers.TFBertModel.from_pretrained('onlplab/alephbert-base')
# model = transformers.TFBertModel.from_pretrained('onlplab/alephbert-base')
model = tf.keras.models.load_model('../model/alephbert_finetuned_model_v2',
custom_objects={'TFBertModel': bert_base},
compile=False) # need to change model path upon deployment
df = pd.read_csv('../data/default_sentence_list_utf8.csv') # need to change default_list.cdv path upon deployment
default_list = df['default sentence list']
def run(raw_data):
try:
raw_data = json.loads(raw_data)['data']
###==============================================================================================
def reference_similarity(user_sentence,threshold,default_list):
all_score = []
most_similar_sentence = ""
similarity_type = 0
# initialize dictionary that will contain tokenized sentences
tokens = {'input_ids': [], 'attention_mask': []}
# comparing reference sentence with default list
for idx in range(len(default_list)+1):
if idx == 0:
new_tokens = tokenizer.encode_plus(user_sentence, max_length=128, truncation=True,
padding='max_length', return_tensors='pt')
tokens['input_ids'].append(new_tokens['input_ids'][0])
tokens['attention_mask'].append(new_tokens['attention_mask'][0])
else:
new_tokens = tokenizer.encode_plus(default_list[idx -1], max_length=128, truncation=True,
padding='max_length', return_tensors='pt')
tokens['input_ids'].append(new_tokens['input_ids'][0])
tokens['attention_mask'].append(new_tokens['attention_mask'][0])
# reformat list of tensors into single tensor
tokens['input_ids'] = tf.stack(tokens['input_ids'])
tokens['attention_mask'] = tf.stack(tokens['attention_mask'])
outputs = model(tokens)
embeddings = outputs.last_hidden_state
attention_mask = tokens['attention_mask']
mask = tf.expand_dims(attention_mask,-1)
mask = tf.broadcast_to(mask, embeddings.shape)
mask = np.asarray(mask).astype(np.float32)
masked_embeddings = embeddings * mask
summed = tf.math.reduce_sum(masked_embeddings, 1)
summed_mask = tf.clip_by_value(mask.sum(1), clip_value_min=1e-9, clip_value_max=1000000)
mean_pooled = summed / summed_mask
# convert from PyTorch tensor to numpy array
mean_pooled = mean_pooled.numpy()
# calculate similarity by comparing 0th value to the rest
all_score = cosine_similarity(
[mean_pooled[0]],
mean_pooled[1:]
)
all_score = all_score[0] # from 2D list to 1D
#find the max value (similarity) and return the most similar sentence
max_val = max(all_score)
item_idx = np.where(all_score==max_val)
item_idx = item_idx[0][0]
most_similar_sentence = default_list[item_idx]
if max_val >= threshold:
similarity_type = 1 # to check if the score is above the threshold
return (most_similar_sentence, similarity_type, max_val)
# default_list = [
# 'מה הפעולות שאתה יודע לעשות?',
# 'איך אתה מרגיש?',
# 'האם עשית משהו מעניין היום?',
# 'אני מרגיש טוב',
# 'בגיל המבוגר חשוב במיוחד לשמור על הבריאות ולאמץ אורח חיים בריא',
# 'מה קרה לך?',
# 'על מה אתה מדבר?',
# 'מה התחביבים שלך?',
# 'מה תרצה ללמוד?',
# 'איך פוגשים ומכירים חברים חדשים?',
# 'מתי ביקרת אצל הרופא?',
# 'מה קראת לאחרונה?',
# 'תספר לי על המשפחה שלך',
# 'במה עבדת?',
# 'אילו בעלי חיים אתה אוהב?',
# 'מה אתה אוהב לאכול?',
# 'איזו מוזיקה אתה אוהב?',
# 'איפה טיילת מחוץ לארץ ?',
# 'מה זכרונות ילדות שלך?',
# 'איזה חג אתה אוהב לחגוג?',
# 'איפה אתה מבלה בטבע?',
# 'איפה ביקרת במדינה?',
# 'איך אתה מסתדר עם מזג האוויר?',
# 'מה המשמעות של השם שלך?',
# 'מה היא לדעתך הזדקנות מוצלחת?'
# ]
#--------------------------------------------------------------------------------------------------------
most_similar_sentence, similarity_type, max_val = reference_similarity(raw_data[0],raw_data[1],default_list)
all_vals = []
all_vals.append(most_similar_sentence)
all_vals.append(similarity_type)
all_vals.append(float(max_val))
except:
all_vals = [0,0,0]
return all_vals
###---------------------------------------------------------------------------------------------- | orion2107/Robohon_Training | azure_files/score_all.py | score_all.py | py | 5,977 | python | he | code | 0 | github-code | 13 |
21105245173 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 08:44:33 2021
@author: a8520
"""
class Node:
def __init__(self, val = None, next_ = None):
self.val = val
self.next_ = next_
# 如果linked list內包含環,如何找到環的入口節點。
class Solution:
def findStart(self, node):
# step1. 看快慢指針是否會重疊、會重疊代表有circle,進null代表沒
ptr1 = node
ptr2 = node
step = 0
while step < 2:
if ptr1.next_ != None:
ptr1 = ptr1.next_
else:
return -1
step += 1
if ptr2.next_ != None:
ptr2 = ptr2.next_
else:
return -1
circle = False
count = 0
# find circle
while ptr1.next_ != None:
if count == 0:
ptr1 = ptr1.next_
count = 1
else:
ptr1 = ptr1.next_
ptr2 = ptr2.next_
count = 0
if ptr1 == ptr2:
circle = True
break
if circle == False:
return -1
# step2. find the number of element in the circle,計算其中一個pointer花多久回到相遇的點
count = 1
ptr2 = ptr2.next_
while ptr2 != ptr1:
ptr2 = ptr2.next_
count += 1
# step3. 其中一個pointer先跑一個circle的步數後同時跑兩根指針,兩指針相遇的點就是環的起點
ptr1 = node
ptr2 = node
step = 0
while step < count:
ptr1 = ptr1.next_
while ptr1 != ptr2:
ptr1 = ptr1.next_
ptr2 = ptr2.next_
return ptr1
node = Node() | uycuhnt2467/-offer | linkedlist中環的入口節點.py | linkedlist中環的入口節點.py | py | 1,831 | python | en | code | 0 | github-code | 13 |
32932333099 | import pandas as pd
import requests
import re
import numpy as np
from sklearn.mixture import GaussianMixture
from bs4 import BeautifulSoup as bs
import os
import warnings
warnings.filterwarnings("ignore")
pd.options.display.float_format = "{:.2f}".format
def apply_regex(regex, string):
match = re.search(regex, string)
if match:
return match.group()
else:
return np.nan
def scrape_to_df(url: str, multilevel=False):
res = requests.get(url)
if res.ok:
soup = bs(res.content, "html.parser")
table = soup.find("table", {"id": "data"})
df = pd.read_html(str(table))[0]
else:
print("oops something didn't work right", res.status_code)
if multilevel:
df.columns = df.columns.droplevel(level=0)
return df
def pull_adp(scoring: str = "STD"):
scoring_map = {
"STD": "overall",
"PPR": "ppr-overall",
"HALF": "half-point-ppr-overall",
}
df = scrape_to_df(f"https://www.fantasypros.com/nfl/adp/{scoring_map[scoring]}.php")
df["Player"] = df["Player Team (Bye)"].apply(
lambda x: apply_regex(r"^([a-zA-Z'-.]+\s[a-zA-Z'-]+)(\s(IV|I{2,3}))?", x)
)
df["Team"] = df["Player Team (Bye)"].apply(
lambda x: apply_regex(r"(?!IV|I{1,3})([A-Z]{2,3})", x)
)
df["Bye"] = df["Player Team (Bye)"].apply(lambda x: apply_regex(r"\d+", x))
df["Position"] = df["POS"].apply(lambda x: apply_regex(r"\D+", x))
df.rename(columns={"AVG": "ADP"}, inplace=True)
df = df[["Rank", "Player", "Team", "Bye", "Position", "ADP"]]
return df
def projected_points(scoring: str = "STD", week: str = "draft"):
"""Scrapes projected points from fantasypros.com
Args:
scoring (str, optional): 'STD', 'PPR', or 'HALF'. Defaults to 'STD'.
week (str, optional): _description_. Defaults to 'draft'.
Returns:
pd.DataFrame: Finalized dataframe with projected points for the week.
"""
data = []
for position in ["qb", "wr", "rb", "te"]:
df = scrape_to_df(
f"https://www.fantasypros.com/nfl/projections/{position}.php?week={week}&scoring={scoring}",
multilevel=True,
)
df["Team"] = df["Player"].apply(
lambda x: apply_regex(r"(?!IV|I{1,3})([A-Z]{2,3})", x)
)
df["Player"] = df["Player"].apply(
lambda x: apply_regex(r"^([a-zA-Z'-.]+\s[a-zA-Z'-]+)(\s(IV|I{2,3}))?", x)
)
df["Position"] = position.upper()
df = df[["Player", "Position", "Team", "FPTS"]]
data.append(df)
data = pd.concat(data)
data = data.sort_values("FPTS", ascending=False).reset_index(drop=True)
return data
def tiering_players_all(scoring: str = "STD"):
def tiering_players_pos(scoring: str = "STD", pos: str = "RB"):
tier_num_mapping = {
"QB": 8,
"RB": 11,
"WR": 12,
"TE": 9,
}
proj_points = projected_points(scoring)
training = proj_points.loc[proj_points["Position"] == pos]
gm = GaussianMixture(n_components=tier_num_mapping[pos], random_state=0)
training["gmm_labels"] = gm.fit_predict(training[["FPTS"]])
tier_map = {}
testlist = training["gmm_labels"].tolist()
count = 1
for num in testlist:
if num not in tier_map:
tier_map[num] = count
count += 1
training["Tier"] = training["gmm_labels"].map(tier_map)
training.drop("gmm_labels", axis=1, inplace=True)
training.reset_index(drop=True, inplace=True)
return training
df_list = []
for position in ["QB", "RB", "WR", "TE"]:
temp = tiering_players_pos(scoring=scoring, pos=position)
df_list.append(temp)
df = pd.concat(df_list)
df.reset_index(drop=True, inplace=True)
df = df[["Player", "Tier"]]
return df
# choose standard, ppr, or halfppr
def get_draft_board(scoring: str = "STD"):
replacement_players = {"QB": "", "RB": "", "WR": "", "TE": ""}
adp_df = pull_adp(scoring)
projections = projected_points(scoring)
cutoff = 95
adp_df_cutoff = adp_df[:cutoff]
for _, row in adp_df_cutoff.iterrows():
position = row["Position"]
player = row["Player"]
if position in replacement_players:
replacement_players[position] = player
replacement_values = {}
for position, player_name in replacement_players.items():
player = projections.loc[projections.Player == player_name]
replacement_values[position] = player["FPTS"].tolist()[0]
projections = projections.loc[projections.Position.isin(["QB", "RB", "WR", "TE"])]
projections["VOR"] = projections.apply(
lambda row: row["FPTS"] - replacement_values.get(row["Position"]), axis=1
)
projections["VOR Rank"] = projections["VOR"].rank(ascending=False).astype(int)
projections = projections.sort_values("VOR", ascending=False)
complete_df = projections.merge(adp_df, how="left", on=["Player", "Position"])[
:200
].dropna()
complete_df["ADP Rank"] = complete_df["ADP"].rank().astype(int)
complete_df["Sleeper Score"] = complete_df["ADP Rank"] - complete_df["VOR Rank"]
complete_df = complete_df[
["Player", "Position", "VOR Rank", "ADP Rank", "Sleeper Score"]
]
player_tiers = tiering_players_all(scoring)
complete_df = pd.merge(complete_df, player_tiers, how="left", on="Player")
return complete_df
# downloads standard, ppr, and halfppr draftboards to a folder
def pull_draft_boards(output_path: str = None):
score_system_list = ["STD", "PPR", "HALF"]
# for score in score_system_list:
# df = get_draft_board(score)
# df.to_excel(
# f"C:/Users/Tyler/Desktop/NFL Tables/Draft Boards/{score}_asof_{str_date}.xlsx",
# index=False,
# )
# with pd.ExcelWriter("draftboards.xlsx", engine="openpyxl") as writer:
# for score in score_system_list:
# df = get_draft_board(score)
# df.to_excel(writer, sheet_name=score, index=False)
for score_system in score_system_list:
df = get_draft_board(score_system)
df.index += 1
df.to_json(
os.path.join(output_path, f"draftboard_{score_system}.json"),
orient="records",
)
if __name__ == "__main__":
pull_draft_boards(os.path.join(os.getcwd(), "root/backend/app/data/"))
| tbakely/thefantasybot | draft_boards.py | draft_boards.py | py | 6,455 | python | en | code | 0 | github-code | 13 |
3229665330 | import re
import random
import os
from datetime import datetime
def szereguj_instancje(nazwa_instancji, liczba_wezlow):
czas = datetime.now()
print("\n[%s] Szeregowanie instancji %s, liczba_wezlow=%d" % (czas, nazwa_instancji, liczba_wezlow))
random.seed(0)
zadania = []
nazwa_pliku_wejsciowego = "../instancje/" + nazwa_instancji
f = open(file=nazwa_pliku_wejsciowego, mode="r")
for line in f:
zadania.append(line.split(' '))
f.close()
nazwa_pliku_wyjsciowego = "../uszeregowanie/szer-jsq-n" + "%03d-" % liczba_wezlow + nazwa_instancji
nazwa_pliku_wyjsciowego_nodes = "../uszeregowanie/szer-jsq-n" + "%03d-nodes-" % liczba_wezlow + nazwa_instancji
czas_wezlow = [0] * liczba_wezlow
wykonana_praca_wezlow = [0] * liczba_wezlow
zadania_wezlow = [[] for _ in range(liczba_wezlow)]
wykonywane_zadania_wezlow = [[] for _ in range(liczba_wezlow)]
czas_opoznienia = []
czas_przetwarzania = []
czas_odpowiedzi = []
for indeks_zadania in range(len(zadania)):
zadanie = zadania[indeks_zadania]
moment_gotowosci = int(zadanie[0])
rozmiar_zadania = int(zadanie[1])
# aktualizacja stanow wezlow do chwili zakonczenia zadania ukonczonego jako ostatnie przed momentem gotowosci obecnego zadania
for indeks_wezla in range(liczba_wezlow):
wykonywane_zadania_wezla = wykonywane_zadania_wezlow[indeks_wezla]
liczba_wykonywanych_zadan = len(wykonywane_zadania_wezla)
while liczba_wykonywanych_zadan > 0:
czas_ukonczenia = czas_wezlow[indeks_wezla] + (wykonywane_zadania_wezla[0][1] - wykonana_praca_wezlow[indeks_wezla]) * liczba_wykonywanych_zadan
if czas_ukonczenia > moment_gotowosci:
break
czas_opoznienia.append(0)
czas_przetwarzania.append(czas_ukonczenia - wykonywane_zadania_wezla[0][0])
czas_odpowiedzi.append(czas_przetwarzania[-1])
wykonana_praca_wezlow[indeks_wezla] += (czas_ukonczenia - czas_wezlow[indeks_wezla]) / liczba_wykonywanych_zadan
wykonywane_zadania_wezla.pop(0)
liczba_wykonywanych_zadan -= 1
czas_wezlow[indeks_wezla] = czas_ukonczenia
# wybor wezla z najmniejsza kolejka wykonywanych zadan
indeks_wybranego_wezla = 0
najmniejsza_liczba_zadan_w_kolejce = len(wykonywane_zadania_wezlow[0])
indeks_wezla = 1
while (indeks_wezla < liczba_wezlow) and (najmniejsza_liczba_zadan_w_kolejce > 0):
if najmniejsza_liczba_zadan_w_kolejce > len(wykonywane_zadania_wezlow[indeks_wezla]):
najmniejsza_liczba_zadan_w_kolejce = len(wykonywane_zadania_wezlow[indeks_wezla])
indeks_wybranego_wezla = indeks_wezla
indeks_wezla += 1
# aktualizacja stanu wezla do chwili momentu gotowosci obecnego zadania
i = 0
wykonywane_zadania_wezla = wykonywane_zadania_wezlow[indeks_wybranego_wezla]
liczba_wykonywanych_zadan = len(wykonywane_zadania_wezla)
uplyniety_czas = moment_gotowosci - czas_wezlow[indeks_wybranego_wezla]
if liczba_wykonywanych_zadan > 0:
wykonana_praca_wezlow[indeks_wybranego_wezla] += uplyniety_czas / liczba_wykonywanych_zadan
while i < liczba_wykonywanych_zadan:
wykonywane_zadania_wezla[i][1] -= wykonana_praca_wezlow[indeks_wybranego_wezla]
i += 1
czas_wezlow[indeks_wybranego_wezla] = moment_gotowosci
wykonana_praca_wezlow[indeks_wybranego_wezla] = 0
# wstawienie obecnego zadania do listy wykonywanych zadan wezla
i = 0
czas_trwania_nowego_zadania = rozmiar_zadania * liczba_wezlow
while i < liczba_wykonywanych_zadan:
if czas_trwania_nowego_zadania < wykonywane_zadania_wezla[i][1]:
break
i += 1
wykonywane_zadania_wezla.insert(i, [moment_gotowosci, czas_trwania_nowego_zadania])
zadania_wezlow[indeks_wybranego_wezla].append(indeks_zadania)
# po przydzieleniu wszystkich zadan nastepuje dokonczenie zadan przebywajacych jeszcze w kolejkach wezlow
for indeks_wezla in range(liczba_wezlow):
wykonywane_zadania_wezla = wykonywane_zadania_wezlow[indeks_wezla]
liczba_wykonywanych_zadan = len(wykonywane_zadania_wezla)
while liczba_wykonywanych_zadan > 0:
czas_ukonczenia = czas_wezlow[indeks_wezla] + (wykonywane_zadania_wezla[0][1] - wykonana_praca_wezlow[indeks_wezla]) * liczba_wykonywanych_zadan
czas_opoznienia.append(0)
czas_przetwarzania.append(czas_ukonczenia - wykonywane_zadania_wezla[0][0])
czas_odpowiedzi.append(czas_przetwarzania[-1])
wykonana_praca_wezlow[indeks_wezla] += (czas_ukonczenia - czas_wezlow[indeks_wezla]) / liczba_wykonywanych_zadan
wykonywane_zadania_wezla.pop(0)
liczba_wykonywanych_zadan -= 1
czas_wezlow[indeks_wezla] = czas_ukonczenia
sredni_czas_opoznienia = sum(czas_opoznienia) / len(czas_opoznienia)
sredni_czas_przetwarzania = sum(czas_przetwarzania) / len(czas_przetwarzania)
sredni_czas_odpowiedzi = sum(czas_odpowiedzi) / len(czas_odpowiedzi)
czas = datetime.now()
print("[%s] sredni_czas_opoznienia=%f\tsredni_czas_przetwarzania=%f\tsredni_czas_odpowiedzi=%f" % (czas, sredni_czas_opoznienia, sredni_czas_przetwarzania, sredni_czas_odpowiedzi))
f = open(file=nazwa_pliku_wyjsciowego, mode="w")
f.write("%.5f %.5f %.5f" % (sredni_czas_opoznienia, sredni_czas_przetwarzania, sredni_czas_odpowiedzi))
f.close()
f = open(file=nazwa_pliku_wyjsciowego_nodes, mode="w")
f.write("%.5f %.5f %.5f" % (sredni_czas_opoznienia, sredni_czas_przetwarzania, sredni_czas_odpowiedzi))
for zadania_wezla in zadania_wezlow:
f.write("\n%d" % len(zadania_wezla))
for zadanie_wezla in zadania_wezla:
f.write(" %d" % zadanie_wezla)
f.close()
def main():
liczby_wezlow = [1, 2, 5, 10, 20, 50, 100]
zawartosc_folderu_instance = os.listdir("../instancje")
wzorzec_inst = re.compile('^inst.*\\.txt$')
lista_nazw_instancji = list(filter(wzorzec_inst.match, zawartosc_folderu_instance))
for nazwa_instancji in lista_nazw_instancji:
for liczba_wezlow in liczby_wezlow:
szereguj_instancje(nazwa_instancji, liczba_wezlow)
if __name__ == "__main__":
main()
| tomdziwood/pbd-projekt | programy_szeregujace/szer_jsq_td.py | szer_jsq_td.py | py | 6,458 | python | pl | code | 0 | github-code | 13 |
1644184521 | from ftplib import FTP
from ftplib import FTP_TLS
x = "www.feg-hochdorf.ch"
y = "fegch_4"
z = "N1Us3kU97x"
u = '/Users/silva/Desktop/Predigtuploader/LOGO_Petrol_weiss.mp4'
class DataToTypo:
def FTP(server_address, ftp_user, ftp_pw, filePath, ftpDirectory="Predigten"):
# Create an FTP_TLS connection
ftp = FTP_TLS(server_address)
ftp.login(user=ftp_user, passwd=ftp_pw)
# Enable SSL/TLS for the data channel
ftp.prot_p()
# List files in the remote directory
ListOfDirectories = ftp.retrlines('NLST')
print(type(ListOfDirectories))
print(ListOfDirectories)
# Change to the remote directory where you want to upload the file
remote_directory = 'Predigten'
ftp.cwd(remote_directory)
#Create the filename from the filepath
fileName = filePath.split("/")[-1]
print(fileName)
# Upload a file in binary mode
with open(filePath, 'rb') as local_file:
ftp.storbinary(f'STOR {fileName}', local_file)
ftp.quit()
DataToTypo.FTP(x, y, z, u)
| VonDoehner/DataToTypo | DataToTypo.py | DataToTypo.py | py | 1,111 | python | en | code | 0 | github-code | 13 |
43231781084 | import pyautogui
from math import *
import numpy as np
import matplotlib
import matplotlib.colors
import matplotlib.pyplot as plt
from matplotlib import patches
import matplotlib.image as mpimg
from matplotlib.animation import FuncAnimation, writers
# ===============================================================================
# ===============================================================================
def getFuncList(angles, amplitudeList, freqList, phaseList):
funcList = []
for i in range(min(len(amplitudeList), len(freqList))):
f = amplitudeList[i] * np.sin(freqList[i] * angles + phaseList[i])+1
funcList.append(f)
return funcList
def wrap(angles, freqList, wf):
if type(angles) is float:
angles = np.array([angles])
funcs = getFuncList(angles, amplitudeList, freqList, phaseList)
func = np.zeros(len(angles))
for i in funcs:
func += i
x = np.cos(wf * angles)
y = np.sin(wf * angles)
fx = func * x
fy = func * y
if len(angles) == 1:
return fx[0], fy[0]
return fx, fy
# ===============================================================================
fig = plt.figure(figsize=(12, 7))
axes_mainGrapth = fig.add_subplot(211)
axes_mainGrapthCircle = fig.add_subplot(212)
axes_mainGrapth.axis('off')
axes_mainGrapthCircle.axis('off')
axes_mainGrapthCircle.axis('equal')
# ===============================================================================
minA = 0
maxA = 2 * pi
dA = 0.01
ff = 1
wf = 1
plotsNum = 3
angles = np.arange(minA, maxA, dA)
amplitudeList = np.array([1,1,1])
freqList = np.array([2,5,9])*1
phaseList = np.array([0,0,0])
# amplitudeList = [i for i in range(1, plotsNum + 1)]
# freqList = [i for i in range(1, plotsNum + 1)]
# phaseList = [0 for i in range(1, plotsNum + 1)]
# print(amplitudeList)
# print(freqList)
# print(phaseList)
plots = getFuncList(angles, amplitudeList, freqList, phaseList)
mainPlot = np.zeros(len(angles))
for i in plots:
mainPlot += i
# ===============================================================================
offset1 = 0.5
mnx = angles.min() - offset1
mxx = angles.max() + offset1
mny = mainPlot.min() - offset1
mxy = mainPlot.max() + offset1
axes_mainGrapth.set_xlim(mnx, mxx)
axes_mainGrapth.set_ylim(mny, mxy)
# ===============================================================================
line_mainPlot, = axes_mainGrapth.plot(angles, mainPlot, lw=1.5, ls='-', c='blue', zorder=0)
axes_mainGrapth.hlines(y=mainPlot.min(), xmin=0, xmax=angles.max(), colors=['darkgrey'])
line_arrow_main, = axes_mainGrapth.plot([], [], lw=2, ls='-', c='red', zorder=1)
scatter_arrowPoint = axes_mainGrapth.scatter([], [], s=15 * pi, color='red', zorder=2)
axes_mainGrapthCircle.hlines(y=0, xmin=-mxy, xmax=mxy, colors=['darkgrey'], lw=1)
axes_mainGrapthCircle.vlines(x=0, ymin=-mxy, ymax=mxy, colors=['darkgrey'], lw=1)
wrapX, wrapY = wrap(angles, freqList, wf)
line_wrapGraph, = axes_mainGrapthCircle.plot(wrapX, wrapY, lw=1.5, ls='-', c='blue', zorder=1)
line_arrow_circle, = axes_mainGrapthCircle.plot([], [], lw=2, ls='-', c='red', zorder=2)
scatter_arrowPoint_circle = axes_mainGrapthCircle.scatter([], [], s=15 * pi, color='red', zorder=3)
# ===============================================================================
w = 0.005
def anim(t):
dt = abs(((w * t) % 1) * maxA)
funcs = getFuncList(np.array([dt]), amplitudeList, freqList, phaseList)
y = np.zeros((1))
for i in funcs:
y += i
arrowX_main = [dt, dt]
arrowY_main = [mainPlot.min(), y]
line_arrow_main.set_data(arrowX_main, arrowY_main)
scatter_arrowPoint.set_offsets([dt, y])
arrowX_circle, arrowY_circle = wrap(dt, freqList, wf)
line_arrow_circle.set_data([0, arrowX_circle], [0, arrowY_circle])
scatter_arrowPoint_circle.set_offsets([arrowX_circle, arrowY_circle])
return scatter_arrowPoint, line_arrow_main, line_arrow_circle, scatter_arrowPoint_circle,
# ===============================================================================
animation1 = FuncAnimation(fig=fig, func=anim, interval=1, blit=True,frames=int(2*pi/w))
# ===============================================================================
# ===============================================================================
path = r"F:\From Disk Windows 7 SSD\Home Work\Different\Essence of Fourier Transform\Gifs\FAddArrowAnim(1).gif"
matplotlib.rcParams['animation.ffmpeg_path'] = r"C:\ffmpeg (доп. ПО длясохранения видео) - ярлык\ffmpeg-4.4-full_build\bin\ffmpeg.exe"
writervideo = matplotlib.animation.FFMpegWriter(fps=30)
#animation1.save(path,writervideo)
# ===============================================================================
plt.interactive(False)
plt.tight_layout()
# PlotManager = plt.get_current_fig_manager()
# PlotManager.window.state('zoomed')
plt.show()
plt.close()
| VY354/my_repository | Python/projects/visualizations/fourier_transform_visualization/FuncAddingGraph(Arrow).py | FuncAddingGraph(Arrow).py | py | 5,060 | python | en | code | 0 | github-code | 13 |
27226412809 | from Persona import Persona
from Salario import Salario
class Empleado(Persona, Salario):
def datosEmpleado(self, salario, cargo):
print(f'El salario es {salario} ')
print(f'El cargo es {cargo}')
objEmpleado = Empleado('Juan', 20, 'Masculino')
objEmpleado.datosPersonales()
objEmpleado.datosEmpleado(2000000, 'Dev')
objEmpleado.valorSalarioMes(50)
| andresdino/usco2023 | Prog2/POO/Herencia/Empleado.py | Empleado.py | py | 374 | python | es | code | 1 | github-code | 13 |
31150708632 | # Temperature Calculator by L. Carthy
import time
def intro_options():
""" Takes the option and returns the fuction
that correlates """
option = int(input("1 for Fahrenheit to Celsius \n"
"2 for Celcius to Fahrenheit \n"
"3 for Fahrenheit to Kelvin: "))
if option == 1:
ftoc_calc()
elif option == 2:
ctof_calc()
elif option == 3:
ftk_calc()
else:
print("That is not an option. Try again.")
time.sleep(2)
intro_options()
def ftoc_calc():
"""Calculates from Fahrenheit to Celsius.
Returns the value of the calculation """
try:
ftc_input = int(input("Enter the Fahrenheit value: "))
ftc_conversion = (ftc_input - 32) * 5/9
print("Your answer in Celsius is: ", ftc_conversion)
except ValueError:
print("Error: Your input is not a number. Try again.")
time.sleep(2)
ftoc_calc()
def ctof_calc():
"""Calculates from Celsius to Fahrenheit.
Returns the value of the calculation """
try:
ctf_input = int(input("Enter the Celsius value: "))
ctf_conversion = ctf_input * 9/5 + 32
print("Your answer in Fahrenheit is: ", ctf_conversion)
except ValueError:
print("Error: Your input is not a number. Try again.")
time.sleep(2)
ftoc_calc()
def ftk_calc():
"""Calculates from Fahrenheit to Kelvin.
Returns the value of the calculation """
try:
ftc_input = int(input("Enter the Fahrenheit value: "))
ftc_conversion = (ftc_input + 459.67) * 5/9
print("Your answer in Kelvin is: ", ftc_conversion)
except ValueError:
print("Error: Your input is not a number. Try again.")
time.sleep(2)
ftoc_calc()
intro_options()
| thisislola/Tutorials | temp_calculator.py | temp_calculator.py | py | 1,828 | python | en | code | 0 | github-code | 13 |
33919194571 | import base64
import hashlib
import hmac
import json
import requests
import time
import urllib.parse
def task_reminder(webhook, secret=None, **kwargs):
webhook_signed = None
timestamp = str(round(time.time() * 1000))
if secret is not None:
secret_enc = secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
webhook_signed = webhook + '×tamp=' + timestamp + '&sign=' + sign
original_text = "#### 完成情况 \n> 已完成\n"
headers = {'Content-Type': 'application/json'}
data = {
"msgtype": "markdown",
"markdown": {
"title": kwargs.get("title", "任务"),
"text": kwargs.get("text", original_text)
},
"at": {
"atMobiles": kwargs.get("mobiles", None),
"isAtAll": False
}
}
if secret is not None:
res = requests.post(webhook_signed, data=json.dumps(data), headers=headers)
else:
res = requests.post(webhook, data=json.dumps(data), headers=headers)
return res
| ChiahsinChu/dpana | dpana/message.py | message.py | py | 1,279 | python | en | code | null | github-code | 13 |
22031476823 | #
# SPDX-License-Identifier: Apache-2.0
#
from rest_framework import serializers
from api.common.enums import NetworkType, ConsensusPlugin, Operation
from api.common.serializers import PageQuerySerializer
NAME_MIN_LEN = 4
NAME_MAX_LEN = 36
NAME_HELP_TEXT = "Name of Cluster"
SIZE_MAX_VALUE = 6
SIZE_MIN_VALUE = 2
class ClusterQuery(PageQuerySerializer):
consensus_plugin = serializers.ChoiceField(
required=False,
allow_null=True,
help_text=ConsensusPlugin.get_info("Consensus Plugin:", list_str=True),
choices=ConsensusPlugin.to_choices(True),
)
name = serializers.CharField(
required=False,
allow_null=True,
min_length=NAME_MIN_LEN,
max_length=NAME_MAX_LEN,
help_text=NAME_HELP_TEXT,
)
host_id = serializers.CharField(
help_text="Host ID", required=False, allow_null=True
)
network_type = serializers.ChoiceField(
required=False,
allow_null=True,
help_text=NetworkType.get_info("Network Types:", list_str=True),
choices=NetworkType.to_choices(),
)
size = serializers.IntegerField(
required=False,
allow_null=True,
min_value=SIZE_MIN_VALUE,
max_value=SIZE_MAX_VALUE,
help_text="Size of cluster",
)
class ClusterIDSerializer(serializers.Serializer):
id = serializers.CharField(help_text="ID of cluster")
class ClusterCreateBody(serializers.Serializer):
name = serializers.CharField(
min_length=NAME_MIN_LEN,
max_length=NAME_MAX_LEN,
help_text=NAME_HELP_TEXT,
)
host_id = serializers.CharField(help_text="Host ID")
network_type = serializers.ChoiceField(
help_text=NetworkType.get_info("Network Types:", list_str=True),
choices=NetworkType.to_choices(),
)
size = serializers.IntegerField(
min_value=SIZE_MIN_VALUE,
max_value=SIZE_MAX_VALUE,
help_text="Size of cluster",
)
consensus_plugin = serializers.ChoiceField(
help_text=ConsensusPlugin.get_info("Consensus Plugin:", list_str=True),
choices=ConsensusPlugin.to_choices(True),
)
class ClusterResponse(serializers.Serializer):
name = serializers.CharField()
class ClusterOperationSerializer(serializers.Serializer):
action = serializers.ChoiceField(
help_text=Operation.get_info("Operation for cluster:", list_str=True),
choices=Operation.to_choices(True),
)
| hyperledger/cello | src/api-engine/api/routes/cluster/serializers.py | serializers.py | py | 2,462 | python | en | code | 862 | github-code | 13 |
7346347093 | # 🚨 Don't change the code below 👇
age = input("What is your current age?")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
remYears = 90 - int(age)
remDays = 365 * remYears
remWeeks = 52 * remYears
remMonths = 12 * remYears
print(f"You have {remDays} days, {remWeeks} weeks, and {remMonths} months left.")
| Daven-anony/90yearsjuslikedat | main.py | main.py | py | 351 | python | en | code | 0 | github-code | 13 |
23060627785 | #!/usr/bin/python3
#
#Funkcja wait_for_key() oczekująca na naciśniecie dowolnego przycisku.
#Test 1: Jakiś program z funkcją print
#Test 2: Program obracający tarczę o 12 kroków w prawo po każdym naciśnięciu dowolnego przycisku.
from board_driver_simulator import open, close, but, pot, det, led # Simulator
import time
from my_board import step_prawo, get_key, licznik, set_point, get_detector
x = licznik
def wait_for_key():
if but() != 0:
return True
else:
return False
try:
open()
while(True):
#if(wait_for_key()):
# print("Naciskasz teraz przycisk") #test1
if(wait_for_key()):
i = 0
while(i < 12): #test2
print("weszlo")
x = step_prawo(x)
set_point(x)
i=i + 1
finally:
close()
| wierzba100/Wprowadzenie-do-programowania-python | Czesc 3/cz3_02.py | cz3_02.py | py | 896 | python | pl | code | 0 | github-code | 13 |
15202605162 | from hashlib import md5
from pathlib import Path
from invoke import task
def as_user(ctx, user, cmd, *args, **kwargs):
ctx.run('sudo --set-home --preserve-env --user {} --login '
'{}'.format(user, cmd), *args, **kwargs)
def as_bench(ctx, cmd, *args, **kwargs):
as_user(ctx, 'bench', cmd)
def sudo_put(ctx, local, remote, chown=None):
tmp = str(Path('/tmp') / md5(remote.encode()).hexdigest())
ctx.put(local, tmp)
ctx.run('sudo mv {} {}'.format(tmp, remote))
if chown:
ctx.run('sudo chown {} {}'.format(chown, remote))
def put_dir(ctx, local, remote):
exclude = ['/.', '__pycache__', '.egg-info', '/tests']
local = Path(local)
remote = Path(remote)
for path in local.rglob('*'):
relative_path = path.relative_to(local)
if any(pattern in str(path) for pattern in exclude):
continue
if path.is_dir():
as_bench(ctx, 'mkdir -p {}'.format(remote / relative_path))
else:
sudo_put(ctx, path, str(remote / relative_path),
chown='bench:users')
@task
def bench(ctx, endpoint='minimal', frameworks='roll', workers='1'):
as_bench(ctx, '/bin/bash -c ". /srv/bench/venv/bin/activate && '
'cd /srv/bench/src/benchmarks && ./bench.sh '
f'--endpoint \"{endpoint}\" --frameworks \"{frameworks}\" '
f'--workers \"{workers}\""')
@task
def system(ctx):
ctx.run('sudo apt update')
ctx.run('sudo apt install python3.6 python3.6-dev wrk '
'python-virtualenv build-essential httpie --yes')
ctx.run('sudo useradd -N bench -m -d /srv/bench/ || exit 0')
ctx.run('sudo chsh -s /bin/bash bench')
@task
def venv(ctx):
as_bench(ctx, 'virtualenv /srv/bench/venv --python=python3.6')
as_bench(ctx, '/srv/bench/venv/bin/pip install pip -U')
@task
def bootstrap(ctx):
system(ctx)
venv(ctx)
deploy(ctx)
@task
def deploy(ctx):
as_bench(ctx, 'rm -rf /srv/bench/src')
# Push local code so we can benchmark local changes easily.
put_dir(ctx, Path(__file__).parent.parent, '/srv/bench/src')
as_bench(ctx, '/srv/bench/venv/bin/pip install -r '
'/srv/bench/src/benchmarks/requirements.txt')
as_bench(ctx, '/bin/bash -c "cd /srv/bench/src/; '
'/srv/bench/venv/bin/python setup.py develop"')
| pyrates/roll | benchmarks/fabfile.py | fabfile.py | py | 2,375 | python | en | code | 27 | github-code | 13 |
7112677644 | # -*- coding: utf-8 -*-
import csv
import random
def run(outname):
print("Input the number of classes: ")
num_class = int(input())
class_prob = []
count = 0
while count < num_class:
num = random.randrange(100)
if num >= 50:
class_prob.append(num)
count += 1
print("Input the number of attributes: ")
num_attr = int(input())
cond_prob = []
for i in range(num_class):
cond_prob.append([])
total = 0
for i in range(num_attr):
print("Current total: {}".format(total))
print("Input the number of values for attribute {}".format(i))
num_value = int(input())
total += num_value
for j in range(num_class):
temp = []
for k in range(num_value):
num = random.randrange(100)
if 30 < num < 50:
num = random.randrange(50, 100)
elif num <= 30:
num = 0
temp.append(num)
cond_prob[j].append(temp)
print(class_prob)
print(cond_prob)
filename = outname + str(total+1) + '_basemodel.csv'
with open(filename, 'w') as csvfile:
f = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
f.writerow(class_prob)
for i in range(num_class):
f.writerow([])
for j, data in enumerate(cond_prob[i]):
f.writerow(data)
filename = outname + str(total+1) + '_info.csv'
with open(filename, 'w') as csvfile:
f = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
temp = []
for i in range(len(class_prob)):
temp.append(i)
f.writerow(temp)
for i in cond_prob[0]:
temp = []
for j in range(len(i)):
temp.append(j)
f.writerow(temp)
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser(description=u"train")
ap.add_argument("-o", "--outname", default="sample", help="output name")
args = ap.parse_args()
run(args.outname)
| yamanalab/NB-Classify | training/createModel.py | createModel.py | py | 2,139 | python | en | code | 3 | github-code | 13 |
17808744416 |
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.text import one_hot
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import to_categorical
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow.keras.utils as ku
from sklearn.model_selection import train_test_split
import pandas as pd
#import nltk
from tensorflow.keras.callbacks import ModelCheckpoint
import csv
### DATA
data_url = "mi_dataset.csv"
# READ DATA
def read_csv_data(file):
tokenized = list()
with open(file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
text = row[1]
tokenized.append(text)
return tokenized
sentences = read_csv_data(data_url)
### DATA PREPARING
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
train_sequences = tokenizer.texts_to_sequences(sentences)
vocab_size = len(tokenizer.word_counts)
max_length = max([len(x) for x in train_sequences])
def generate_padded_sequences(input_sequences):
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_length, padding='pre'))
predictors, label = input_sequences[:,:-1],input_sequences[:,-1]
label = ku.to_categorical(label, num_classes=vocab_size)
return predictors, label
x_full, y_full = generate_padded_sequences(train_sequences)
#SPLIT DATASET - 80% WE USE TO TRAIN MODEL, 20% TO TEST RESULTS
x_train, x_test = train_test_split(x_full, test_size=0.2, random_state=0)
y_train, y_test = train_test_split(y_full, test_size=0.2, random_state=0)
#MODEL SETTINGS
embedding_dim = 32
num_epochs = 30
learning_rate=0.01
#MODEL ARCHITECTURE
model_lstm = tf.keras.models.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=max_length-1),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(vocab_size, activation='softmax')
])
model_lstm.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
#LOAD WEIGHTS/MODEL
model_lstm.load_weights("weights-improvement-30-3.5959.hdf5")
# Re-evaluate the model
loss, acc = model_lstm.evaluate(x_test, y_test, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100 * acc))
#model_lstm.summary()
filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
#TRAINING
'''
history_lstm = model_lstm.fit(x_train, y_train,
validation_data=(x_test, y_test),
epochs=num_epochs,
verbose=2,
callbacks=callbacks_list
)
'''
#GENERATION TEXT
def generate_text(seed_text, num_words, model, max_sequence_len):
for _ in range(num_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
#predicted = model.predict_classes(token_list, verbose=0)
predict_x=model.predict(token_list)
predicted=np.argmax(predict_x,axis=1)
output_word = ""
for word,index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " "+output_word
return seed_text
seed_text = "even if "
new_text = generate_text(seed_text, 10, model_lstm, max_length)
print(new_text) | e8dev/e8dev-quick-text-generation | quick_lstm.py | quick_lstm.py | py | 3,792 | python | en | code | 0 | github-code | 13 |
39585857108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from http.server import CGIHTTPRequestHandler, HTTPServer
import os
HOST = ''
PORT = 8080
class RequestHandler(CGIHTTPRequestHandler):
# source in http://hg.python.org/cpython/file/3.3/Lib/http/server.py
cgi_directories = ["/cmp/"]
def do_POST(self):
# set up "fake" chrooted CGI directory.
self.cgi_directories = ["/"]
cdir = os.path.abspath(os.curdir)
os.chdir('cmp/')
# fake the path to the compiler.
self.path = self.path.split("/", 2)[2]
# try to run the CGI program.
CGIHTTPRequestHandler.do_POST(self)
# restore.
os.chdir(cdir)
self.cgi_directories = ["/cmp/"]
httpd = HTTPServer((HOST, PORT), RequestHandler)
httpd.serve_forever()
| ingemaradahl/bilder-demo | server.py | server.py | py | 795 | python | en | code | 1 | github-code | 13 |
17597173134 | # x = input("첫번째 숫자를 입력 : ")
# y = input("두번째 숫자를 입력 : ")
# print("두 수의 곱은 ...")
# print(x * y)
# 위와 같이 연산을 진행할경우 오류가 발생!
# - 입력받은 수가 str 판정이기 때문. -
# 형변환을 위해서는 `int(변수)`, `str(변수)` 와 같이 자료형 뒤에 소괄호!
# x = int(input("첫번째 숫자를 입력 : "))
# y = int(input("두번째 숫자를 입력 : "))
# print("두 수의 곱은 ...")
# print(int(x) * int(y))
# 실습!
# 사용자로부터 배어난 년도를 입력받으면 현재 나이를 계산하여 출력할 것.
# 비교할 현재년도 정보
todayYear = 2023
# 입력받을 수
userBirth = int(input("출생년도를 입력해주세요! : "))
# 나이 출력
print("만 " + todayYear - userBirth + "세 이시네요!")
# 파이썬은 Java에서와 달리 `숫자 + 문자`로 출력하면 오류가 발생한다.
# 이를 해결하기 위하여 숫자값 앞에 str()을 붙이거나, 숫자로 된 문자값 앞에 int()를 붙여주어야 한다.
#
# 비교할 현재년도 정보
todayYear = 2023
# 입력받을 수
userBirth = int(input("출생년도를 입력해주세요! : "))
# 나이 출력
print("만 " + str(todayYear - userBirth) + "세 이시네요!") | junkue20/Inflearn_Python_Study | 5강_입력과자료형변환/quiz.py | quiz.py | py | 1,289 | python | ko | code | 0 | github-code | 13 |
24423139490 | import os
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui import resources_rc
# import the shotgun_fields module from the framework
shotgun_fields = sgtk.platform.import_framework(
"tk-framework-qtwidgets", "shotgun_fields")
# import the shotgun_globals module from shotgunutils framework
shotgun_globals = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_globals")
# import the shotgun_model module from shotgunutils framework
shotgun_model = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_model")
# import the views module from qtwidgets framework
views = sgtk.platform.import_framework(
"tk-framework-qtwidgets", "views")
class FieldWidgetDelegateDemo(QtGui.QWidget):
"""
This widget shows a form for editing fields on an entity.
"""
def __init__(self, parent=None):
"""
Initialize the widget.
"""
# call the base class init
super(FieldWidgetDelegateDemo, self).__init__(parent)
# the fields manager is used to query which fields are supported
# for display. it can also be used to find out which fields are
# visible to the user and editable by the user. the fields manager
# needs time to initialize itself. once that's done, the widgets can
# begin to be populated.
self._fields_manager = shotgun_fields.ShotgunFieldManager(self)
self._fields_manager.initialized.connect(self._populate_ui)
self._fields_manager.initialize()
def _populate_ui(self):
"""
The fields manager has been initialized. Now we can requests some
widgets to use in the UI.
:return:
"""
entity_type = "Project"
# get a list of fields for the entity type
fields = shotgun_globals.get_entity_fields(entity_type)
fields = [
"image",
"name",
"sg_description",
]
# make sure the fields list only includes editable fields
fields = [f for f in fields
if shotgun_globals.field_is_editable(entity_type, f)]
# make sure the fields are supported by the fields manager
fields = self._fields_manager.supported_fields(entity_type, fields)
# we'll display all the fields we're querying in the model
columns = fields
# since we've filtered out only editable fields, we'll make those
# editable within the model
editable_columns = columns
# ---- Here we create a ShotgunModel and a ShotgunTableView. The
# ShotgunTableView will automatically create delegates for the
# columns defined in the model, so you don't have to manually
# create delegates yourself.
auto_delegate_lbl = QtGui.QLabel(
"A <tt>ShotgunTableView</tt> with auto-assigned field delegates:"
)
# create the table view
self._auto_delegate_table = views.ShotgunTableView(self._fields_manager,
parent=self)
self._auto_delegate_table.horizontalHeader().setStretchLastSection(True)
# setup the model
self._sg_model = shotgun_model.SimpleShotgunModel(self)
self._sg_model.load_data(entity_type, fields=fields, columns=columns,
editable_columns=editable_columns)
self._auto_delegate_table.setModel(self._sg_model)
# the sg model's first column always includes the entity code and
# thumbnail. hide that column
self._auto_delegate_table.hideColumn(0)
# ---- Here we create our own QStandardItemModel and manually assign
# delegates to it. This is useful if you would like to build an
# interface to enter data that will eventually be used to create
# or update Shotgun entities.
manual_delegate_lbl = QtGui.QLabel(
"A Standard <tt>QTableView</tt> with manually-assigned field delegates:"
)
self._manual_delegate_table = QtGui.QTableView(self)
self._manual_delegate_table.horizontalHeader().setStretchLastSection(True)
# get delegates for each of the columns to display
image_delegate = self._fields_manager.create_generic_delegate(
entity_type, "image", self._manual_delegate_table)
name_delegate = self._fields_manager.create_generic_delegate(
entity_type, "name", self._manual_delegate_table)
desc_delegate = self._fields_manager.create_generic_delegate(
entity_type, "sg_description", self._manual_delegate_table)
# tell the delegates to get/set data via the display role rather than
# the default SG model's associated data role. This allows the delegate
# to be used with non-SG models.
for delegate in [image_delegate, name_delegate, desc_delegate]:
delegate.data_role = QtCore.Qt.DisplayRole
# assign the delegates to the table columns
self._manual_delegate_table.setItemDelegateForColumn(0, image_delegate)
self._manual_delegate_table.setItemDelegateForColumn(1, name_delegate)
self._manual_delegate_table.setItemDelegateForColumn(2, desc_delegate)
self._standard_model = QtGui.QStandardItemModel(3, 3, self)
self._standard_model.setHorizontalHeaderLabels(
["Thumbnail", "Name", "Description"])
thumbnail_item = QtGui.QStandardItem()
thumbnail_item.setData(
QtGui.QPixmap( ":/tk_multi_demo_field_widget_delegate/project_1.png"),
image_delegate.data_role
)
self._standard_model.insertRow(
0,
[
thumbnail_item,
QtGui.QStandardItem("New Project"),
QtGui.QStandardItem("This is a project that could be created."),
]
)
self._standard_model.insertRow(
1,
[
QtGui.QStandardItem(
os.path.join(os.path.dirname(__file__), "resources", "project_2.png")
),
QtGui.QStandardItem("New Project2"),
QtGui.QStandardItem("Another project example description."),
]
)
self._manual_delegate_table.setModel(self._standard_model)
help_lbl = QtGui.QLabel(
"* Double click fields to modify values <strong>(changes will not be saved)</strong>"
)
# and layout the dialog
layout = QtGui.QVBoxLayout(self)
layout.addWidget(auto_delegate_lbl)
layout.addWidget(self._auto_delegate_table)
layout.addWidget(manual_delegate_lbl)
layout.addWidget(self._manual_delegate_table)
layout.addWidget(help_lbl)
self.setLayout(layout)
| ColinKennedy/tk-config-default2-respawn | bundle_cache/app_store/tk-multi-demo/v1.0.2/python/tk_multi_demo/demos/field_widget_delegate/demo.py | demo.py | py | 6,804 | python | en | code | 10 | github-code | 13 |
38910809873 | class Ship2(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.movey = 100
self.movex =100
pos = (400,400)
self.image = pygame.image.load('ship.png')
self.image = pygame.transform.smoothscale(self.image,(100, 100))
self.rect = self.image.get_rect()
self.rect.center = pos
self.speed = pygame.math.Vector2(0, 0)
self.health = 10
def update(self,enemyGroup):
self.rect.move_ip(self.speed)
hitlist = pygame.sprite.spritecollide(self,enemyGroup, False)
for enemy in hitlist:
self.health -= 1
print(self.health) | drakebayless90/astroids | Ship2.py | Ship2.py | py | 676 | python | en | code | 0 | github-code | 13 |
40336501615 | from base.base_train import BaseTrain
from tqdm import tqdm
import numpy as np
from time import sleep
from time import time
from utils.evaluations import save_results
class Mark1_Trainer(BaseTrain):
def __init__(self, sess, model, data, config, summarizer):
super(Mark1_Trainer, self).__init__(sess, model, data, config, summarizer)
# This values are added as variable becaouse they are used a lot and changing it become difficult over time.
self.batch_size = self.config.data_loader.batch_size
self.noise_dim = self.config.trainer.noise_dim
self.img_dims = self.config.trainer.image_dims
# Inititalize the train Dataset Iterator
self.sess.run(self.data.iterator.initializer)
# Initialize the test Dataset Iterator
self.sess.run(self.data.test_iterator.initializer)
if self.config.data_loader.validation:
self.sess.run(self.data.valid_iterator.initializer)
self.best_valid_loss = 0
self.nb_without_improvements = 0
def train_epoch(self):
begin = time()
# Attach the epoch loop to a variable
loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))
# Define the lists for summaries and losses
gen_losses = []
disc_losses = []
disc_xz_losses = []
disc_xx_losses = []
disc_zz_losses = []
summaries = []
# Get the current epoch counter
cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)
image = self.data.image
for _ in loop:
loop.set_description("Epoch:{}".format(cur_epoch + 1))
loop.refresh() # to show immediately the update
sleep(0.01)
lg, ld, ldxz, ldxx, ldzz, sum_g, sum_d = self.train_step(image, cur_epoch)
gen_losses.append(lg)
disc_losses.append(ld)
disc_xz_losses.append(ldxz)
disc_xx_losses.append(ldxx)
disc_zz_losses.append(ldzz)
summaries.append(sum_g)
summaries.append(sum_d)
self.logger.info("Epoch {} terminated".format(cur_epoch))
self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)
# Check for reconstruction
if cur_epoch % self.config.log.frequency_test == 0:
image_eval = self.sess.run(image)
feed_dict = {self.model.image_input: image_eval, self.model.is_training: False}
reconstruction = self.sess.run(self.model.sum_op_im, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])
# Get the means of the loss values to display
gl_m = np.mean(gen_losses)
dl_m = np.mean(disc_losses)
dlxz_m = np.mean(disc_xz_losses)
dlxx_m = np.mean(disc_xx_losses)
dlzz_m = np.mean(disc_zz_losses)
if self.config.trainer.allow_zz:
self.logger.info(
"Epoch {} | time = {} | loss gen = {:4f} |"
"loss dis = {:4f} | loss dis xz = {:4f} | loss dis xx = {:4f} | "
"loss dis zz = {:4f}".format(
cur_epoch, time() - begin, gl_m, dl_m, dlxz_m, dlxx_m, dlzz_m
)
)
else:
self.logger.info(
"Epoch {} | time = {} | loss gen = {:4f} | "
"loss dis = {:4f} | loss dis xz = {:4f} | loss dis xx = {:4f} | ".format(
cur_epoch, time() - begin, gl_m, dl_m, dlxz_m, dlxx_m
)
)
# Save the model state
# self.model.save(self.sess)
if (
cur_epoch + 1
) % self.config.trainer.frequency_eval == 0 and self.config.trainer.enable_early_stop:
valid_loss = 0
image_valid = self.sess.run(self.data.valid_image)
feed_dict = {self.model.image_input: image_valid, self.model.is_training: False}
vl = self.sess.run([self.model.rec_error_valid], feed_dict=feed_dict)
valid_loss += vl[0]
if self.config.log.enable_summary:
sm = self.sess.run(self.model.sum_op_valid, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[sm], summarizer="valid")
self.logger.info("Validation: valid loss {:.4f}".format(valid_loss))
if (
valid_loss < self.best_valid_loss
or cur_epoch == self.config.trainer.frequency_eval - 1
):
self.best_valid_loss = valid_loss
self.logger.info(
"Best model - valid loss = {:.4f} - saving...".format(self.best_valid_loss)
)
# Save the model state
self.model.save(self.sess)
self.nb_without_improvements = 0
else:
self.nb_without_improvements += self.config.trainer.frequency_eval
if self.nb_without_improvements > self.config.trainer.patience:
self.patience_lost = True
self.logger.warning(
"Early stopping at epoch {} with weights from epoch {}".format(
cur_epoch, cur_epoch - self.nb_without_improvements
)
)
self.logger.warn("Testing evaluation...")
scores = []
inference_time = []
true_labels = []
# Create the scores
test_loop = tqdm(range(self.config.data_loader.num_iter_per_test))
for _ in test_loop:
test_batch_begin = time()
test_batch, test_labels = self.sess.run([self.data.test_image, self.data.test_label])
test_loop.refresh() # to show immediately the update
sleep(0.01)
feed_dict = {self.model.image_input: test_batch, self.model.is_training: False}
scores += self.sess.run(self.model.score, feed_dict=feed_dict).tolist()
inference_time.append(time() - test_batch_begin)
true_labels += test_labels.tolist()
true_labels = np.asarray(true_labels)
inference_time = np.mean(inference_time)
self.logger.info("Testing: Mean inference time is {:4f}".format(inference_time))
scores = np.asarray(scores)
scores_scaled = (scores - min(scores)) / (max(scores) - min(scores))
step = self.sess.run(self.model.global_step_tensor)
save_results(
self.config.log.result_dir,
scores_scaled,
true_labels,
self.config.model.name,
self.config.data_loader.dataset_name,
"fm",
"paper",
self.config.trainer.label,
self.config.data_loader.random_seed,
self.logger,
step,
)
def train_step(self, image, cur_epoch):
"""
implement the logic of the train step
- run the tensorflow session
- return any metrics you need to summarize
"""
true_labels, generated_labels = self.generate_labels(
self.config.trainer.soft_labels, self.config.trainer.flip_labels
)
# Train the discriminator
image_eval = self.sess.run(image)
feed_dict = {
self.model.image_input: image_eval,
self.model.generated_labels: generated_labels,
self.model.true_labels: true_labels,
self.model.is_training: True,
}
_, _, _, ld, ldxz, ldxx, ldzz, sm_d = self.sess.run(
[
self.model.train_dis_op_xz,
self.model.train_dis_op_xx,
self.model.train_dis_op_zz,
self.model.loss_discriminator,
self.model.dis_loss_xz,
self.model.dis_loss_xx,
self.model.dis_loss_zz,
self.model.sum_op_dis,
],
feed_dict=feed_dict,
)
# Train Generator
true_labels, generated_labels = self.generate_labels(
self.config.trainer.soft_labels, self.config.trainer.flip_labels
)
feed_dict = {
self.model.image_input: image_eval,
self.model.generated_labels: generated_labels,
self.model.true_labels: true_labels,
self.model.is_training: True,
}
_, lg, sm_g = self.sess.run(
[self.model.train_gen_op, self.model.gen_loss_total, self.model.sum_op_gen],
feed_dict=feed_dict,
)
return lg, ld, ldxz, ldxx, ldzz, sm_g, sm_d
def generate_labels(self, soft_labels, flip_labels):
if not soft_labels:
true_labels = np.ones((self.config.data_loader.batch_size, 1))
generated_labels = np.zeros((self.config.data_loader.batch_size, 1))
else:
generated_labels = np.zeros(
(self.config.data_loader.batch_size, 1)
) + np.random.uniform(low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1])
# flipped_idx = np.random.choice(
# np.arange(len(generated_labels)),
# size=int(self.config.trainer.noise_probability * len(generated_labels)),
# )
# generated_labels[flipped_idx] = 1 - generated_labels[flipped_idx]
true_labels = np.ones((self.config.data_loader.batch_size, 1)) - np.random.uniform(
low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1]
)
# flipped_idx = np.random.choice(
# np.arange(len(true_labels)),
# size=int(self.config.trainer.noise_probability * len(true_labels)),
# )
# true_labels[flipped_idx] = 1 - true_labels[flipped_idx]
if flip_labels:
return generated_labels, true_labels
else:
return true_labels, generated_labels
| yigitozgumus/Polimi_Thesis | trainers/mark1_trainer.py | mark1_trainer.py | py | 9,888 | python | en | code | 5 | github-code | 13 |
73275285136 | from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import random
import hashlib
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler) :
rpc_paths = ('/RPC2',)
# Create server
server = SimpleXMLRPCServer(("localhost", 8000),
requestHandler=RequestHandler, allow_none=True)
server.register_introspection_functions()
# something to say about this!:
omarIsLoggedIn = False
maxIsLoggedIn = False
jokerIsLoggedIn = False
randomNum = None
# getPasswordByUsername() method will return password based on available user
def getPasswordByUsername(username):
f = open("users.txt", "r")
for account in f.readlines():
us, pw = account.strip().split("|")
if (username in us):
return pw
# genRandnumber generates random number that will be stored afterwards:
def genRandnumber():
global randomNum
randomNum = random.randint(0,9999)
return randomNum
server.register_function(genRandnumber)
# serverHasher generates hash from the server side:
def serverHasher(username) :
password = getPasswordByUsername(username)
secret = password + str(randomNum)
hashValue = hashlib.sha256(secret.encode('utf-8'))
return hashValue.hexdigest()
server.register_function(serverHasher)
# compareHashes checks if the returned hash from
# the server is equal to the hash from the client:
def compareHashes(hashValue,username):
global omarIsLoggedIn
global maxIsLoggedIn
global jokerIsLoggedIn
if serverHasher(username) == hashValue:
if username == "omar" : omarIsLoggedIn = True
elif username == "max" : maxIsLoggedIn = True
elif username == "joker": jokerIsLoggedIn = True
return 'login succeeded'
else:
return 'login failed'
server.register_function(compareHashes)
def userIsLoggedIn(username):
print(username)
if username == "omar": return omarIsLoggedIn
elif username == "max": return maxIsLoggedIn
elif username == "joker" : return jokerIsLoggedIn
###
# Below are some methods to use once the user successfully login
###
# method that adds two numbers
def add(x, y, username) :
print(x,y,username)
print(userIsLoggedIn(username))
if(userIsLoggedIn(username)):
return x + y
else:
return "your not logged in!"
server.register_function(add, 'add')
# method that substract a numbers from another
def subtract(x, y, username) :
print(x,y,username)
print(userIsLoggedIn(username))
if(userIsLoggedIn(username)):
return x - y
else:
return "your not logged in!"
server.register_function(subtract)
# method that multiply two numbers
def multiply(x, y, username) :
print(x,y,username)
print(userIsLoggedIn(username))
if(userIsLoggedIn(username)):
return x * y
else:
return "your not logged in!"
server.register_function(multiply)
# method that divide two numbers
def divide(x, y, username) :
print(x,y,username)
print(userIsLoggedIn(username))
if(userIsLoggedIn(username)):
return x // y
else:
return "your not logged in!"
server.register_function(divide)
# Run the server's main loop
server.serve_forever() | gitfarah/CHAP_Paython- | server.py | server.py | py | 3,263 | python | en | code | 0 | github-code | 13 |
34072180440 | # solved in 5m
n = int(input())
s = input()
if len(s) % n != 0:
print("ERROR")
exit()
for i in range(0, len(s), n):
print(bin(sum(map(int,s[i:i+n])))[-1],end="")
| Elod-T/codingame | clash of code/fastest/parityOfSumsInGroupsInBinary.py | parityOfSumsInGroupsInBinary.py | py | 176 | python | en | code | 0 | github-code | 13 |
15391300596 | # -*- coding:utf-8 -*-
# update for BALDR HEART EXE 2017.09.14
import struct
import os
import sys
import io
def byte2int(byte):
long_tuple=struct.unpack('L',byte)
long = long_tuple[0]
return long
def int2byte(num):
return struct.pack('L',num)
def FormatString(string, count):
res = "○%08d○\n%s\n●%08d●\n%s\n\n"%(count, string, count, string)
return res
def ReadPart1(src):
str_list = []
for i in range(5):
l=dumpstr(src)
str_list.append(l)
return str_list
def ReadPart2(src,count):
str_list = []
for i in range(count):
buff = byte2int(src.read(4))
if buff == 0:
print(src,hex(src.tell()))
continue
for k in range(2):
l=dumpstr(src)
str_list.append(l)
src.seek(5,os.SEEK_CUR)
src.seek(104,os.SEEK_CUR)
return str_list
def Sou_ReadPart2(src,count):
str_list = []
for i in range(count):
buff = byte2int(src.read(4))
if buff == 0:
buff = byte2int(src.read(4))
while buff == 0xFFFFFFFF:
src.seek(24,os.SEEK_CUR)
buff = byte2int(src.read(4))
if buff != 0:
src.seek(-4,os.SEEK_CUR)
else:
if src.tell() == 0xb0f or src.tell() == 0xc2a or src.tell() == 0x1da3 or src.tell() == 0x3628 or src.tell() == 0x3722 or src.tell() == 0x8833 or src.tell() == 0x1656 or src.tell() == 0x221c or src.tell() == 0x3e5f or src.tell() == 0x3f59:
src.seek(0x50,os.SEEK_CUR)
elif src.tell() == 0x8009:
src.seek(0x4,os.SEEK_CUR)
elif src.tell() == 0x8778:
src.seek(0x14,os.SEEK_CUR)
elif src.tell() == 0xd23:
src.seek(0x34,os.SEEK_CUR)
elif src.tell() == 0x664a or src.tell() == 0x764b:
src.seek(0x4c,os.SEEK_CUR)
elif src.tell() == 0x1c7b or src.tell() == 0x8926 or src.tell() == 0x1749 or src.tell() == 0x20f4:
src.seek(0x88,os.SEEK_CUR)
elif src.tell() == 0x4ed1 or src.tell() == 0x5ed2:
src.seek(0xac,os.SEEK_CUR)
elif src.tell() == 0x900a:
# src.seek(0x4,os.SEEK_CUR)
break
else:
print(hex(src.tell()))
os.system('pause')
break
for k in range(2):
l = dumpstr(src)
str_list.append(l)
src.seek(5,os.SEEK_CUR)
src.seek(104,os.SEEK_CUR)
return str_list
def ReadPart3(src):
str_list = []
buff = byte2int(src.read(4))
for i in range(2):
l=dumpstr(src)
str_list.append(l)
return str_list
def ReadPart4(src):
str_list = []
for s in range(2):
count = byte2int(src.read(4))
for i in range(count):
l=dumpstr(src)
str_list.append(l)
l=dumpstr(src)
str_list.append(l)
return str_list
def dumpstr(src):
bstr = b''
c = src.read(1)
while c != b'\x00':
bstr += c
c = src.read(1)
return bstr.decode('932')
for f in os.listdir('Dat'):
if not f.endswith('._mek'):
continue
src = open('Dat/'+f,'rb')
src.seek(0, os.SEEK_END)
filesize = src.tell()
src.seek(0, os.SEEK_SET)
str_offset = byte2int(src.read(4))
src.seek(str_offset)
all_str = []
all_str = ReadPart1(src)
dstname = 'Dat/' + f[:-5] + '.txt'
dst = open(dstname, 'w', encoding='utf16')
i = 0
for string in all_str:
dst.write(FormatString(string, i))
i += 1
src.seek(8, os.SEEK_SET)
str_offset = byte2int(src.read(4))
src.seek(str_offset)
count = byte2int(src.read(4))
if dstname != 'Dat/sou.txt':
all_str = ReadPart2(src,count)
else:
all_str = Sou_ReadPart2(src,226)
for string in all_str:
dst.write(FormatString(string, i))
i += 1
src.seek(12, os.SEEK_SET)
str_offset = byte2int(src.read(4))
src.seek(str_offset)
all_str = ReadPart3(src)
for string in all_str:
dst.write(FormatString(string, i))
i += 1
src.seek(16, os.SEEK_SET)
str_offset = byte2int(src.read(4))
src.seek(str_offset)
buff = byte2int(src.read(4))
if buff != 0xFFFFFFFF:
all_str = ReadPart4(src)
for string in all_str:
dst.write(FormatString(string, i))
i += 1
src.close()
dst.close() | Yggdrasill-Moe/Niflheim | NeXAS/mek_dump.py | mek_dump.py | py | 3,753 | python | en | code | 105 | github-code | 13 |
33659007012 | from jinja2 import Environment, FileSystemLoader
import json, subprocess, time
def runScript():
subprocess.call("npm run script --prefix handong-newsletter-script", shell=True)
def todayDate():
days = "일월화수목금토"
day = days[int(time.strftime("%w"))] + "요일"
date = time.strftime("%Y.%m.%d")
return [day, date]
def render_html():
runScript()
with open("handong-newsletter-script/data.json", encoding="utf8") as dummy_file:
dummy = json.load(dummy_file)
FOODDATA = dummy["food"]
ANON_DATA = sorted(dummy["anon"], key=lambda x: x["view"], reverse=True)
ANON_HOTDATA = ANON_DATA[:5]
ANON_RESTDATA = ANON_DATA[5:]
env = Environment(
loader=FileSystemLoader(".")
)
template = env.get_template("template/index.jinja")
rendered_html = template.render(date=todayDate(), food=FOODDATA, anon_hot=ANON_HOTDATA, anon_rest=ANON_RESTDATA)
return rendered_html | junglesub/handong-newsletter | template/template.py | template.py | py | 912 | python | en | code | 2 | github-code | 13 |
22220877029 | import io
from typing import Union
import pytesseract
import streamlit as st
from PIL import Image
from src.models import Receipt
def preprocess_parsed_text(parsed_text: str):
return parsed_text.strip()
@st.cache
def parse_cropped_image(cropped_image: Image, psm: int) -> str:
if cropped_image.format != "JPEG":
# Make sure the image type is something Tesseract can handle
cropped_image = cropped_image.convert("RGB")
parsed_text = pytesseract.image_to_string(
cropped_image, config=rf"-l eng --psm {psm}"
)
parsed_text = preprocess_parsed_text(parsed_text)
return parsed_text
def parse_and_clean(receipt_image: io.BytesIO) -> Union[Receipt, None]:
# Get total price to validate that scan is correct
total_price_container = st.container()
with total_price_container:
total_bill_cost = st.text_input("Please enter bill total below:")
if total_bill_cost:
total_bill_cost = float(total_bill_cost)
# This should be False by default, usually OCR works better
# for receipts with psm=4
alternate_psm = st.checkbox(
"Alternate OCR PSM",
help="Toggle this setting if the extracted text is poor quality.",
)
psm = 6 if alternate_psm else 4
parsed_text = parse_cropped_image(receipt_image, psm=psm)
cleaned_text = st.text_area(
"Parsed Text, please fix - line entry names should be unique.",
parsed_text,
height=500,
)
receipt = Receipt.parse_receipt_text(cleaned_text)
with total_price_container:
if receipt.total_cost != total_bill_cost:
st.markdown(
f"""
## Wait! ⚠️
**The bill total provided, £{total_bill_cost:.2f}, does not \
match the total bill amount below, £{receipt.total_cost:.2f}.**
*Please fix the scanned text manually to ensure they match.*"""
)
return
else:
st.markdown(
"""
## Looks good! ✅
*The bill total provided matches the bill total scanned, \
proceed to the Friends tab.*"""
)
return receipt
| lscholtes/billSplit | src/scan.py | scan.py | py | 2,349 | python | en | code | 0 | github-code | 13 |
23444189413 | # -*- coding:utf-8 -*-
from sklearn.datasets import load_breast_cancer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
#载入数据
cancer = load_breast_cancer()
X_train,X_test,y_train,y_test = train_test_split(cancer.data,cancer.target,
stratify=cancer.target,random_state=66)
training_accuracy = []#训练集精度
test_accuracy = []#测试集精度
neighbors_settings = range(1,11)
#模型训练
for n_neightbors in neighbors_settings:
clf = KNeighborsClassifier(n_neighbors=n_neightbors)
clf.fit(X_train, y_train)
#记录训练集精度
training_accuracy.append(clf.score(X_train,y_train))
#记录泛化精度
test_accuracy.append(clf.score(X_test,y_test))
plt.plot(neighbors_settings, training_accuracy, label='training accuracy')
plt.plot(neighbors_settings, test_accuracy, label='test accuracy')
plt.ylabel('Accuracy')
plt.xlabel('n_neighbors')
plt.legend()
plt.show()
| liuaichao/python-work | 机器学习/k近邻模型/乳腺癌/ruxian.py | ruxian.py | py | 1,040 | python | en | code | 6 | github-code | 13 |
17669402330 | #TC = O(N) and SC = O(N)
class Solution(object):
def connect(self, root):
if root == None:
return root
queue = deque()
queue.append(root)
while queue:
level_len = len(queue)
prev_node = None
for level_index in range(0, level_len):
cur_node = queue.popleft()
if level_index == level_len - 1:
cur_node.next == None
if prev_node:
prev_node.next = cur_node
prev_node = cur_node
if cur_node.left:
queue.append(cur_node.left)
if cur_node.right:
queue.append(cur_node.right)
return root | SharmaManjul/DS-Algo | LeetCode/Medium/grok_connectLevelOrderSiblings.py | grok_connectLevelOrderSiblings.py | py | 755 | python | en | code | 0 | github-code | 13 |
8576085872 | """
Base rule optimiser class. Main rule optimisers classes inherit from this one.
"""
from iguanas.rules import Rules
import iguanas.utils as utils
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
from iguanas.utils.types import NumpyArray, PandasDataFrame, PandasSeries
from iguanas.warnings import RulesNotOptimisedWarning
from iguanas.exceptions import RulesNotOptimisedError
from typing import Callable, Dict, List, Set, Tuple
import pandas as pd
import numpy as np
import warnings
from copy import deepcopy
import matplotlib.pyplot as plt
import seaborn as sns
class _BaseOptimiser(Rules):
"""
Base rule optimiser class. Main rule optimiser classes inherit from this
one.
Parameters
----------
rule_lambdas : Dict[str, Callable[[Dict], str]]
Set of rules defined using the standard Iguanas lambda expression
format (values) and their names (keys).
lambda_kwargs : Dict[str, Dict[str, float]]
For each rule (keys), a dictionary containing the features used in the
rule (keys) and the current values (values).
metric : Callable
The optimisation function used to calculate the metric which the rules
are optimised for (e.g. F1 score).
num_cores : int, optional
The number of cores to use when optimising the rule thresholds.
Defaults to 1.
verbose : int, optional
Controls the verbosity - the higher, the more messages.
Attributes
----------
rule_strings : Dict[str, str]
The optimised + unoptimisable (but applicable) rules, defined using the
standard Iguanas string format (values) and their names (keys).
rule_lambdas : Dict[str, object]
The optimised rules + unoptimisable (but applicable), defined using the
standard Iguanas lambda expression format (values) and their names
(keys).
lambda_kwargs : Dict[str, object]
The keyword arguments for the optimised + unoptimisable (but
applicable) rules defined using the standard Iguanas lambda expression
format.
rules : Rules
The Rules object containing the optimised + unoptimisable (but
applicable) rules.
rule_names : List[str]
The names of the optimised + unoptimisable (but applicable) rules.
rule_names_missing_features : List[str]
Names of rules which use features that are not present in the dataset
(and therefore can't be optimised or applied).
rule_names_no_opt_conditions : List[str]
Names of rules which have no optimisable conditions (e.g. rules that
only contain string-based conditions).
rule_names_zero_var_features : List[str]
Names of rules which exclusively contain zero variance features (based
on `X`), so cannot be optimised.
opt_rule_performances : Dict[str, float]
The optimisation metric (values) calculated for each optimised rule
(keys).
orig_rule_performances : Dict[str, float]
The optimisation metric (values) calculated for each original rule
(keys).
non_optimisable_rules : Rules
A `Rules` object containing the rules which contained exclusively
non-optimisable conditions.
zero_varaince_rules : Rules
A `Rules` object containing the rules which contained exclusively zero
variance features.
"""
def __init__(self,
rule_lambdas: Dict[str, Callable[[Dict], str]],
lambda_kwargs: Dict[str, Dict[str, float]],
metric: Callable,
num_cores: int,
verbose: int):
Rules.__init__(self)
self.orig_rule_lambdas = rule_lambdas
self.orig_lambda_kwargs = lambda_kwargs
self.metric = metric
self.num_cores = num_cores
self.verbose = verbose
self.rules = Rules()
def fit_transform(self,
X: PandasDataFrameType,
y=None,
sample_weight=None) -> PandasDataFrameType:
"""
Same as `.fit()` method - ensures rule optimiser conforms to
fit/transform methodology.
Parameters
----------
X : PandasDataFrameType
The feature set.
y : PandasSeriesType, optional
The binary target column. Not required if optimising rules on
unlabelled data. Defaults to None.
sample_weight : PandasSeriesType, optional
Record-wise weights to apply. Defaults to None.
Returns
-------
PandasDataFrameType
The binary columns of the optimised + unoptimisable (but
applicable) rules on the fitted dataset.
"""
return self.fit(X=X, y=y, sample_weight=sample_weight)
@classmethod
def plot_performance_uplift(self,
orig_rule_performances: Dict[str, float],
opt_rule_performances: Dict[str, float],
figsize=(20, 10)) -> sns.scatterplot:
"""
Generates a scatterplot showing the performance of each rule before
and after optimisation.
Parameters
----------
orig_rule_performances : Dict[str, float]
The performance metric of each rule prior to optimisation.
opt_rule_performances : Dict[str, float]
The performance metric of each rule after optimisation.
figsize : tuple, optional
The width and height of the scatterplot. Defaults to (20, 10).
Returns
-------
sns.scatterplot
Compares the performance of each rule before and after optimisation.
"""
performance_comp, _ = self._calculate_performance_comparison(
orig_rule_performances=orig_rule_performances,
opt_rule_performances=opt_rule_performances
)
sns.set_style("whitegrid")
plt.figure(figsize=figsize)
sns.scatterplot(x=list(performance_comp.index),
y=performance_comp['OriginalRule'], color='blue', label='Original rule')
sns.scatterplot(x=list(performance_comp.index),
y=performance_comp['OptimisedRule'], color='red', label='Optimised rule')
plt.title(
'Performance comparison of original rules vs optimised rules')
plt.xticks(rotation=90)
plt.ylabel('Performance (of the provided optimisation metric)')
plt.show()
@classmethod
def plot_performance_uplift_distribution(self,
orig_rule_performances: Dict[str, float],
opt_rule_performances: Dict[str, float],
figsize=(8, 10)) -> sns.boxplot:
"""
Generates a boxplot showing the distribution of performance uplifts
(original rules vs optimised rules).
Parameters
----------
orig_rule_performances : Dict[str, float]
The performance metric of each rule prior to optimisation.
opt_rule_performances : Dict[str, float]
The performance metric of each rule after optimisation.
figsize : tuple, optional
The width and height of the boxplot. Defaults to (20, 10).
Returns
-------
sns.boxplot
Shows the distribution of performance uplifts (original rules vs optimised rules).
"""
_, performance_difference = self._calculate_performance_comparison(
orig_rule_performances=orig_rule_performances,
opt_rule_performances=opt_rule_performances
)
sns.set_style("whitegrid")
plt.figure(figsize=figsize)
sns.boxplot(y=performance_difference)
plt.title(
'Distribution of performance uplift, original rules vs optimised rules')
plt.xticks(rotation=90)
plt.ylabel(
'Performance uplift (of the provided optimisation metric)')
plt.show()
def _prepare_rules_for_opt(self,
X: PandasDataFrameType,
y: PandasSeriesType,
sample_weight: PandasSeriesType) -> Tuple[
PandasSeriesType,
PandasSeriesType,
PandasDataFrameType]:
"""
Performs the following before rule optimisation can take place:
1. Checks if any rules contain features missing in `X` - if so,
these rules are dropped.
2. Checks for rules that exclusively contain non-optimisable
conditions - if so, these rules are not optimised (but are added
to the final rule set).
3. Checks for rules that exclusively contain zero variance features
- if so, these rules are not optimised (but are added to the final
rule set).
4. Creates the `Rules` object `optimisable_rules` - these are the
rules that are used in the optimisation process.
"""
utils.check_allowed_types(X, 'X', [PandasDataFrame])
if y is not None:
utils.check_allowed_types(y, 'y', [PandasSeries])
if sample_weight is not None:
utils.check_allowed_types(
sample_weight, 'sample_weight', [PandasSeries])
utils.check_duplicate_cols(X, 'X')
self.orig_rules = Rules(
rule_lambdas=self.orig_rule_lambdas.copy(),
lambda_kwargs=self.orig_lambda_kwargs.copy(),
)
_ = self.orig_rules.as_rule_strings(as_numpy=False)
if self.verbose > 0:
print(
'--- Checking for rules with features that are missing in `X` ---'
)
self.rule_names_missing_features, rule_features_in_X = self._return_rules_missing_features(
rules=self.orig_rules,
columns=X.columns,
verbose=self.verbose
)
# If there are rules with missing features in `X`, drop these rules
if self.rule_names_missing_features:
self.orig_rules.filter_rules(
exclude=self.rule_names_missing_features
)
# Filter `X` to rule features
X = X[rule_features_in_X]
if self.verbose > 0:
print(
'--- Checking for rules that exclusively contain non-optimisable conditions ---'
)
# Return rules with no optimisable conditions (e.g. categorical)
self.rule_names_no_opt_conditions = self._return_all_optimisable_rule_features(
lambda_kwargs=self.orig_rules.lambda_kwargs,
verbose=self.verbose
)
# Get set of features (values) for each rule (keys)
rule_features = self.orig_rules.get_rule_features()
# Get set of features used in whole rule set
rule_features_set = set().union(*self.orig_rules.get_rule_features().values())
# Get min, max of `X`
X_min, X_max = self._return_X_min_max(
X=X,
cols=rule_features_set
)
if self.verbose > 0:
print(
'--- Checking for rules that exclusively contain zero-variance features ---'
)
# Return rules with exclusively zero variance features
self.rule_names_zero_var_features = self._return_rules_with_zero_var_features(
rule_features=rule_features,
rule_names=list(self.orig_rules.rule_lambdas.keys()),
X_min=X_min,
X_max=X_max,
rule_names_no_opt_conditions=self.rule_names_no_opt_conditions,
verbose=self.verbose
)
# Generate optimisable, non-optimisable and zero-variance rule sets
self.optimisable_rules, self.non_optimisable_rules, self.zero_variance_rules = self._return_optimisable_rules(
rules=self.orig_rules,
rule_names_no_opt_conditions=self.rule_names_no_opt_conditions,
rule_names_zero_var_features=self.rule_names_zero_var_features
)
if not self.optimisable_rules.rule_lambdas:
raise RulesNotOptimisedError(
'There are no optimisable rules in the set'
)
# Get performance of original, optimisable rules
orig_X_rules = self.optimisable_rules.transform(X=X)
self.orig_rule_performances = dict(
zip(
orig_X_rules.columns.tolist(),
self.metric(orig_X_rules, y, sample_weight)
)
)
if self.verbose > 0:
print('--- Optimising rules ---')
return X_min, X_max, orig_X_rules
def _return_final_rule_set(self,
X: PandasDataFrameType,
y: PandasSeriesType,
sample_weight: PandasSeriesType,
opt_rule_strings: Dict[str, str],
orig_X_rules: PandasDataFrameType) -> PandasDataFrameType:
"""
Performs the following before generating the final rule set:
1. Calculates the performance of the optimised rules.
2. Compares the performance of the optimised rules to the original
rules - if the original rule is better performing, it's added to
the final rule set; else the optimised rule is added.
3. Any rules that exclusively contain non-optimisable conditions
are added to the final rule set.
"""
# Get performance of optimised rules
opt_rules = Rules(rule_strings=opt_rule_strings)
opt_X_rules = opt_rules.transform(X=X)
self.opt_rule_performances = dict(
zip(
opt_X_rules.columns.tolist(),
self.metric(opt_X_rules, y, sample_weight)
)
)
# Compare original to optimised rules and return original if better
# performing
opt_rule_strings, self.opt_rule_performances, X_rules = self._return_orig_rule_if_better_perf(
orig_rule_performances=self.orig_rule_performances,
opt_rule_performances=self.opt_rule_performances,
orig_rule_strings=self.optimisable_rules.rule_strings,
opt_rule_strings=opt_rules.rule_strings,
orig_X_rules=orig_X_rules,
opt_X_rules=opt_X_rules
)
# Combine optimised rules with non-optimised rules (so both can be
# applied)
self.rule_strings = {
**opt_rule_strings, **self.non_optimisable_rules.rule_strings
}
# If non-optimisable rules present, apply and combine with `X_rules`
# (this reduces runtime by not applying the full rule set again)
if self.non_optimisable_rules.rule_strings:
X_rules = pd.concat(
[X_rules, self.non_optimisable_rules.transform(X)], axis=1
)
self._generate_other_rule_formats()
return X_rules
@staticmethod
def _calculate_performance_comparison(orig_rule_performances: Dict[str, float],
opt_rule_performances: Dict[str, float]) -> Tuple[PandasDataFrameType, PandasSeriesType]:
"""
Generates two dataframe - one showing the performance of the original
rules and the optimised rules, the other showing the difference in
performance per rule.
"""
performance_comp = pd.concat([pd.Series(
orig_rule_performances), pd.Series(opt_rule_performances)], axis=1)
performance_comp.columns = ['OriginalRule', 'OptimisedRule']
performance_difference = performance_comp['OptimisedRule'] - \
performance_comp['OriginalRule']
return performance_comp, performance_difference
@staticmethod
def _return_X_min_max(X: PandasDataFrameType,
cols: List[str]) -> Tuple[PandasSeriesType, PandasSeriesType]:
"""Returns the min and max of columns provided"""
X_min = X[cols].min()
X_max = X[cols].max()
return X_min, X_max
@staticmethod
def _return_rules_missing_features(rules: Rules,
columns: List[str],
verbose: int) -> Tuple[List, Set]:
"""
Returns the names of rules that contain features missing from `X`.
"""
rule_features = rules.get_rule_features()
rule_names_missing_features = []
rule_features_set = set()
rule_features_items = utils.return_progress_ready_range(
verbose=verbose, range=rule_features.items())
for rule_name, feature_set in rule_features_items:
missing_features = [
feature for feature in feature_set if feature not in columns]
[rule_features_set.add(feature)
for feature in feature_set if feature in columns]
if missing_features:
rule_names_missing_features.append(rule_name)
if rule_names_missing_features:
warnings.warn(
message=f'Rules `{"`, `".join(rule_names_missing_features)}` use features that are missing from `X` - unable to optimise or apply these rules',
category=RulesNotOptimisedWarning
)
return rule_names_missing_features, rule_features_set
@staticmethod
def _return_all_optimisable_rule_features(lambda_kwargs: Dict[str, Dict[str, float]],
verbose: int) -> Tuple[List[str], List[str]]:
"""
Returns a list of all of the features used in each optimisable rule
within the set.
"""
rule_names_no_opt_conditions = []
lambda_kwargs_items = utils.return_progress_ready_range(
verbose=verbose, range=lambda_kwargs.items()
)
for rule_name, lambda_kwarg in lambda_kwargs_items:
if lambda_kwarg == {}:
rule_names_no_opt_conditions.append(rule_name)
if rule_names_no_opt_conditions:
warnings.warn(
message=f'Rules `{"`, `".join(rule_names_no_opt_conditions)}` have no optimisable conditions - unable to optimise these rules',
category=RulesNotOptimisedWarning
)
return rule_names_no_opt_conditions
@staticmethod
def _return_rules_with_zero_var_features(rule_features: List[str],
rule_names: List[str],
X_min: Dict[str, float],
X_max: Dict[str, float],
rule_names_no_opt_conditions: List[str],
verbose: int) -> List[str]:
"""
Returns list of rule names that have all zero variance features, so
cannot be optimised.
"""
# Get zero var features (including np.nan)
zero_var_features = X_min.index[
X_min.replace(np.nan, 'np.nan') == X_max.replace(np.nan, 'np.nan')
].tolist()
# Get rules that exclusively contain zero var features
rule_names_all_zero_var = []
rule_names = utils.return_progress_ready_range(
verbose=verbose, range=rule_names
)
for rule_name in rule_names:
# If rule has no optimisable conditions, skip
if rule_name in rule_names_no_opt_conditions:
continue
rule_is_all_zero_var = all(
[rule_feature in zero_var_features for rule_feature in rule_features[rule_name]]
)
# If all rule features are zero var, add rule to rule_names_all_zero_var
if rule_is_all_zero_var:
rule_names_all_zero_var.append(rule_name)
if rule_names_all_zero_var:
warnings.warn(
message=f'Rules `{"`, `".join(rule_names_all_zero_var)}` have all zero variance features based on the dataset `X` - unable to optimise these rules',
category=RulesNotOptimisedWarning
)
return rule_names_all_zero_var
@staticmethod
def _return_optimisable_rules(rules: Rules,
rule_names_no_opt_conditions: List[str],
rule_names_zero_var_features: List[str]) -> Tuple[Rules, Rules]:
"""
Copies the Rules class and filters out rules which cannot be
optimised from the original Rules class. Then filters to only those
un-optimisable rules in the copied Rules class, and returns both
"""
rule_names_to_exclude = rule_names_no_opt_conditions + rule_names_zero_var_features
non_optimisable_rules = deepcopy(rules)
zero_variance_rules = deepcopy(rules)
rules.filter_rules(exclude=rule_names_to_exclude)
non_optimisable_rules.filter_rules(
include=rule_names_no_opt_conditions
)
zero_variance_rules.filter_rules(
include=rule_names_zero_var_features
)
return rules, non_optimisable_rules, zero_variance_rules
@staticmethod
def _return_orig_rule_if_better_perf(orig_rule_performances: Dict[str, float],
opt_rule_performances: Dict[str, float],
orig_rule_strings: Dict[str, str],
opt_rule_strings: Dict[str, str],
orig_X_rules: PandasDataFrameType,
opt_X_rules: PandasDataFrameType) -> Tuple[Dict[str, str], Dict[str, float]]:
"""
Overwrites the optimised rule string with the original if the original
is better performing. Also update the performance dictionary with the
original if this is the case.
"""
for rule_name in opt_rule_strings.keys():
if orig_rule_performances[rule_name] >= opt_rule_performances[rule_name]:
opt_rule_strings[rule_name] = orig_rule_strings[rule_name]
opt_rule_performances[rule_name] = orig_rule_performances[rule_name]
opt_X_rules[rule_name] = orig_X_rules[rule_name]
return opt_rule_strings, opt_rule_performances, opt_X_rules
def _generate_other_rule_formats(self) -> None:
"""Generates other rule formats from `self.rule_strings`"""
# Generate rule names
self.rule_names = list(self.rule_strings.keys())
# Convert generated rules into lambda format. Set rule_lambdas to an
# empty dict first, prevents errors when running fit more than once.
self.rule_lambdas = {}
self.rule_lambdas = self.as_rule_lambdas(
as_numpy=False, with_kwargs=True
)
# Generate rules object
self.rules = Rules(
rule_strings=self.rule_strings, rule_lambdas=self.rule_lambdas,
lambda_kwargs=self.lambda_kwargs
)
| paypal/Iguanas | iguanas/rule_optimisation/_base_optimiser.py | _base_optimiser.py | py | 23,227 | python | en | code | 73 | github-code | 13 |
18726696152 | import numpy as np
import numpy.random as rand
import matplotlib.pyplot as plt
from numpy.linalg import norm
def derivativetest(fun, x0):
"""
Test the gradient and Hessian of a function. A large proportion
parallel in the middle of both plots means accuraccy.
INPUTS:
fun: a function handle that gives f, g, Hv
x0: starting point
OUTPUTS:
derivative test plots
"""
x0 = x0.reshape(len(x0),1)
fun0 = fun(x0)
dx = rand.randn(len(x0),1)
M = 20;
dxs = np.zeros((M,1))
firsterror = np.zeros((M,1))
order1 = np.zeros((M-1,1))
seconderror = np.zeros((M,1))
order2 = np.zeros((M-1,1))
for i in range(M):
x = x0 + dx
fun1 = fun(x)
H0 = Ax(fun0[2],dx)
firsterror[i] = abs(fun1[0] - (fun0[0] + np.dot(
dx.T, fun0[1])))/abs(fun0[0])
seconderror[i] = abs(fun1[0] - (fun0[0] + np.dot(
dx.T, fun0[1]) + 0.5* np.dot(dx.T, H0)))/abs(fun0[0])
print('First Order Error is %8.2e; Second Order Error is %8.2e'% (
firsterror[i], seconderror[i]))
if i > 0:
order1[i-1] = np.log2(firsterror[i-1]/firsterror[i])
order2[i-1] = np.log2(seconderror[i-1]/seconderror[i])
dxs[i] = norm(dx)
dx = dx/2
step = [2**(-i-1) for i in range(M)]
plt.figure(figsize=(12,8))
plt.subplot(221)
plt.loglog(step, abs(firsterror),'b', label = '1st Order Err')
plt.loglog(step, dxs**2,'r', label = 'order')
plt.gca().invert_xaxis()
plt.legend()
plt.subplot(222)
plt.semilogx(step[1:], order1,'b', label = '1st Order')
plt.gca().invert_xaxis()
plt.legend()
plt.subplot(223)
plt.loglog(step, abs(seconderror),'b', label = '2nd Order Err')
plt.loglog(step, dxs**3,'r', label = 'Order')
plt.gca().invert_xaxis()
plt.legend()
plt.subplot(224)
plt.semilogx(step[1:], order2,'b', label = '2nd Order')
plt.gca().invert_xaxis()
plt.legend()
return plt.show()
def Ax(A, x):
if callable(A):
Ax = A(x)
else:
Ax =A.dot(x)
return Ax | syangliu/Naive-Newton-MR | derivativetest.py | derivativetest.py | py | 2,159 | python | en | code | 4 | github-code | 13 |
43217610073 | from threading import Thread
from PySide6 import QtWidgets
from sweep import Sweep
from tello import Tello
from video import Video
class Button(QtWidgets.QPushButton):
def __init__(self, text, action):
QtWidgets.QPushButton.__init__(self)
self.setText(text)
self.clicked.connect(lambda: Thread(target=action).start())
# button style
self.setFixedSize(200, 40)
self.setStyleSheet('QPushButton {background-color: #30336b; color: white; border-radius: 5px}')
font = self.font()
font.setBold(True)
font.setPointSize(16)
self.setFont(font)
class Control(QtWidgets.QVBoxLayout):
"""List of buttons which send commands to tello drone """
distance = 30 # cm
def __init__(self, tello: Tello):
QtWidgets.QVBoxLayout.__init__(self)
self.tello = tello
self.video = Video(tello)
self.sweep = Sweep(tello)
# buttons
self.addWidget(Button("Take Off", tello.takeoff))
self.addWidget(Button("Land", tello.land))
self.addWidget(Button("Stream Video", self.video.start))
self.addWidget(Button("Perimeter sweep", self.sweep.start))
self.addWidget(Button("Manual override", self.sweep.pause))
self.addWidget(Button("Forward", lambda: tello.move_forward(self.distance)))
self.addWidget(Button("Back", lambda: tello.move_back(self.distance)))
self.addWidget(Button("Left", lambda: tello.move_left(self.distance)))
self.addWidget(Button("Right", lambda: tello.move_right(self.distance)))
# style
self.setSpacing(20)
| rlgo/tello | control.py | control.py | py | 1,627 | python | en | code | 0 | github-code | 13 |
20346791693 | """ This module defines an ObservableProperty class.
An ObservablePropery must be declared as class attribute, similar to standard python properties.
You can bind callables to an ObservableProperty. The callable is called when the property value is set.
Example:
-------
>>> class MyBaseClass:
>>> prop1 = ObservableProperty(21)
>>> prop2 = ObservableProperty(22)
>>>
>>> def __init__(self):
>>> do_something()
>>>
>>> class Observer:
>>> def onProp1Changed(self, value):
>>> print 'prop1=', value
>>> def onProp21Changed(self, value):
>>> print 'prop2=', value
>>>
>>> actor = MyBaseClass()
>>> observer = Observer()
>>> bind(actor, prop1=observer.onProp1Changed, prop2=observer.onProp2Changed)
>>> actor.prop1=42
< prop1= 42
>>> actor.prop2='Hello World'
< prop2= Hello World
"""
import contextlib
import inspect
import weakref
from contextlib import contextmanager
class WeakRef:
""" This Weak Ref implementation allows to hold references to bound methods.
=> see http://stackoverflow.com/questions/599430/why-doesnt-the-weakref-work-on-this-bound-method"""
def __init__(self, item):
self.reference = None
self.method = None
self.instance = None
try:
self.method = item.__func__.__name__
self.instance = weakref.ref(item.__self__)
except AttributeError:
self.reference = weakref.ref(item)
def get_ref(self):
if self.reference is not None:
return self.reference()
instance = self.instance()
if instance is None:
return None
return getattr(instance, self.method)
def __eq__(self, other):
try:
if self.reference is not None:
return self.reference == other.reference
return self.method == other.method and self.instance == other.instance
except AttributeError:
# other is of an unknown class
return False
class _ObservableValue:
""" Implements the basic mechanism for an observable value. """
def __init__(self, value, fire_only_on_changed_value=True):
self.value = value
self._fire_only_on_changed_value = fire_only_on_changed_value
self._observers = []
def set_value(self, value):
if value == self.value and self._fire_only_on_changed_value:
return
self.value = value
obsolete_refs = []
# now call all listeners. Keep track of obsolete weak references
for ref in self._observers[:]: # make a copy of list, content might change during iteration
try:
func = ref.get_ref()
except AttributeError: # no Weakref instance => strong reference, use ref directly
func = ref
if func is None:
obsolete_refs.append(ref)
else:
func(self.value) # call func
for ref in obsolete_refs:
with contextlib.suppress(ValueError): # e.g. has been deleted by someone else in different thread
self._observers.remove(ref)
def bind(self, func):
self._observers.append(WeakRef(func))
def strongbind(self, func):
self._observers.append(func)
def unbind(self, func):
func_ref = WeakRef(func)
for ref in self._observers:
if ref in (func, func_ref):
self._observers.remove(ref)
break
def unbind_all(self):
del self._observers[:]
class ObservableProperty:
""" stores data in parent obj """
def __init__(self, default_value=None, fire_only_on_changed_value=True):
self._default_value = default_value
self._fire_only_on_changed_value = fire_only_on_changed_value
def _get_instance_data(self, obj):
# see if we already have a _property_instance_data dictionary injected in obj
# otherwise inject it
# pylint: disable=protected-access
try:
lookup = obj._property_instance_data
except AttributeError:
obj._property_instance_data = {}
lookup = obj._property_instance_data
# pylint: enable=protected-access
# see if we already have a data instance for my property instance and class instance
# otherwise create one
try:
return lookup[self]
except KeyError:
lookup[self] = _ObservableValue(self._default_value, self._fire_only_on_changed_value)
return lookup[self]
def __get__(self, obj, objtype):
return self if obj is None else self._get_instance_data(obj).value
def __set__(self, obj, value):
if obj is None:
self._default_value = value
else:
self._get_instance_data(obj).set_value(value)
def __delete__(self, obj):
pass
def bind(self, obj, func):
self._get_instance_data(obj).bind(func)
def strongbind(self, obj, func):
self._get_instance_data(obj).strongbind(func)
def unbind(self, obj, func):
self._get_instance_data(obj).unbind(func)
def unbind_all(self, obj):
self._get_instance_data(obj).unbind_all()
def __repr__(self):
return f'ObservableProperty at 0x{id(self):X}, default value={self._default_value}'
def _find_property(obj, name):
""" Helper that looks in class hierarchy for matching member
"""
classes = inspect.getmro(
obj.__class__) # getmro returns a tuple of class base classes, including class, in method resolution order
for cls in classes: # find the first class that has the expected member
try:
return cls.__dict__[name]
except KeyError:
pass
raise KeyError(name) # if no class matches, raise KeyError
def bind(obj, **kwargs):
""" bind callables with a weak reference.
Use this bind method for all 'normal' callables like functions or methods.
The advantage is that the garbage collector can remove objects even if they are referenced by ObservableProperty.
ObservableProperty silently removes the callable if it no longer exists.
This method does not work with lambda expressions!
:param obj: an object with ObservableProperty member(s)
:param kwargs: name of parameter must match the name of an ObservableProperty, value must be a callable."""
for name, func in kwargs.items():
prop = _find_property(obj, name)
prop.bind(obj, func)
def strongbind(obj, **kwargs):
""" bind callables with a strong reference.
This method also works with lambda expressions, but you must unbind the callable before the garbage collector can delete it."""
for name, func in kwargs.items():
prop = _find_property(obj, name)
prop.strongbind(obj, func)
def unbind(obj, **kwargs):
""" unbind callables that were bound before.
:param obj: an object with ObservableProperty member(s)
:param kwargs: name of parameter must match the name of an ObservableProperty, value must be a callable.
Unbinding an unknown callable is allowed, in this cases nothing changes. """
for name, func in kwargs.items():
prop = _find_property(obj, name)
prop.unbind(obj, func)
def unbind_all(obj, *propertyNames):
""" unbind all callables that were bound before.
:param obj: an object with ObservableProperty member(s)
:param propertyNames: list of strings, each string names an ObservableProperty.
"""
for name in propertyNames:
prop = _find_property(obj, name)
prop.unbind_all(obj)
@contextmanager
def bound_context(obj, **kwargs):
""" context manager for bind / unbind sequence."""
bind(obj, **kwargs)
try:
yield
finally:
unbind(obj, **kwargs)
@contextmanager
def strong_bound_context(obj, **kwargs):
""" context manager for strongbind / unbind sequence."""
strongbind(obj, **kwargs)
try:
yield
finally:
unbind(obj, **kwargs)
| Draegerwerk/sdc11073 | src/sdc11073/observableproperties/observables.py | observables.py | py | 8,034 | python | en | code | 27 | github-code | 13 |
20686388054 | import pickle
import numpy as np
from data import data
class Model ():
def __init__(self) -> None:
pickle_in = open("..\\files\\classifier.pkl","rb")
self.clf=pickle.load(pickle_in)
pk_sc = open("..\\files\\scaler.pkl","rb")
self.sc= pickle.load(pk_sc)
self.country = {'France' : 0, 'Germany' : 1, 'Spain' : 2}
self.gender = {'Male' : 0, 'Female' : 1}
def predict_sample(self , data:data):
data = data.dict()
Tenure=data['Tenure']
NumOfProducts=data['NumOfProducts']
IsActiveMember=data['IsActiveMember']
HasCrCard=data['HasCrCard']
Geography = self.country[data['Geography']]
Gender = self.gender[data['Gender']]
cols = [data['CreditScore'], data['Balance'], data['EstimatedSalary'], data['Age'] ]
cols = np.array(cols)
scaled =self.sc.transform(cols.reshape(1, -1))
CreditScore = scaled[0][0]
Balance = scaled[0][1]
EstimatedSalary = scaled[0][2]
Age = scaled[0][1]
sample=[CreditScore,Geography,Gender,Age,Tenure,Balance,NumOfProducts,HasCrCard,IsActiveMember,EstimatedSalary]
pred = self.clf.predict([sample])
if pred[0]==1:
p = 'exite'
else:
p= 'not exite'
return {"prediction": p}
| OmarKhaledAbdlhafez/Churn-Classification | deployments/model.py | model.py | py | 1,327 | python | en | code | 0 | github-code | 13 |
34908799846 | '''
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
进阶:
你可以迭代或递归地反转链表。你能否用两种方法解决这道题?
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
# 迭代法
# if not head or not head.next:
# return head
prev = None
while head:
last = head.next
head.next = prev
prev = head
head = last
return prev
def reverseList_2(self, head: ListNode) -> ListNode:
# 递归法
if not head or not head.next:
return head
newHead = self.reverseList_2(head.next)
head.next.next = head
head.next = None
return newHead
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
s = Solution()
pointer = s.reverseList_2(node5)
while pointer:
print(pointer.val)
pointer = pointer.next
| Yujunw/leetcode_python | 206_反转链表.py | 206_反转链表.py | py | 1,206 | python | en | code | 0 | github-code | 13 |
11364254729 | from turtle import *
import random
import math
class square (Turtle):
def __init__(self,height,color,speed):
Turtle.__init__(self)
self.shape("square")
self.shapesize(height)
self.color(color)
self.speed(speed)
rec1=square(7,"red",1)
rec2=square(7,"blue",1)
x1=rec1.xcor()
x2=rec2.xcor()
y1=rec1.ycor()
y2=rec2.ycor()
rec1_top=y1+(rec1.shapesize()[0]/2)
rec1_right=x1+(rec1.shapesize()[0]/2)
rec1_bottom=y1-(rec1.shapesize()[0]/2)
rec1_left=x1-(rec1.shapesize()[0]/2)
rec2_top=y2-(rec2.shapesize()[0]/2)
rec2_right=x2+(rec2.shapesize()[0]/2)
rec2_bottom=y2-(rec2.shapesize()[0]/2)
rec2_left=x2-(rec2.shapesize()[0]/2)
posx1=random.randint(0,100)
posx2=random.randint(0,100)
posy1=random.randint(0,100)
posy2=random.randint(0,100)
rec1.goto(posx1,posy1)
rec2.goto(posx2,posy2)
def check_collision(rec1,rec2):
if (rec1_top >= rec2_bottom and rec1_right>=rec2_left and rec1_bottom <=rec2_top and rec1_left <= rec2_right):
rec1.color("black")
rec2.color("pink")
check_collision(rec1,rec2)
mainloop() | rahaf19-meet/yl1201718 | lab6/rectangle.py | rectangle.py | py | 1,026 | python | en | code | 0 | github-code | 13 |
71719325457 | import pickle
########### LEER ARCHIVO ##############
def linea_archivo(arch,default):
linea=arch.readline()
return linea if linea else default
def leer_usuario(arch):
linea=linea_archivo(arch,"end,0,0,0,0")
id,nombre,fecha,peliculas,estado=linea.strip().split(',')
return id,nombre,fecha,peliculas,estado
########## GRABAR ARCHIVO ############
def grabar_usuario(arch,id,nombre,fecha,peliculas,estado):
arch.write(id+','+nombre+','+fecha+','+peliculas+','+estado+'\n')
###################################
def grabarError(arch,nombre,dato1,dato2):
lista=dato1.split(";")
aux=dato2.split(";")
for campo in aux:
if campo not in aux:
lista.aapend(aux)
unir=';'
pelis=unir.join(lista)
arch.write(nombre + ',' + pelis + '\n')
def verificarDatos(nombre1,fecha1,peliculas1,nombre2,fecha2,peliculas2,nombre3,fecha3,peliculas3):
errores=open("long.txt","r+")
if nombre1 == nombre2:
if fecha1 != fecha2:
grabarError(errores,nombre1,peliculas1,peliculas2)
elif nombre2 == nombre3:
if fecha2 != fecha3:
grabarError(errores,nombre2,peliculas2,peliculas3)
elif nombre3 == nombre1 :
if fecha3 != fecha2:
grabarError(errores,nombre3,peliculas3,peliculas1)
def merge(): #AGREGE ACA
user1=open("usuarios1.csv","r")
user2=open("usuarios2.csv","r")
user3=open("usuarios3.csv","r")
user_m=open("usuario_maestro.bin","w")
id1,nombre1,fecha1,peliculas1,estado1 = leer_usuario(user1)
id2,nombre2,fecha2,peliculas2,estado2 = leer_usuario(user2)
id3,nombre3,fecha3,peliculas3,estado3 = leer_usuario(user3)
while id1 != 'end' or id2 != 'end' or id3 != 'end':
menor = min(id1,id2,id3)
verificarDatos(nombre1,fecha1,peliculas1,nombre2,fecha2,peliculas2,nombre3,fecha3,peliculas3)
if id1 == menor:
grabar_usuario(user_m,id1,nombre1,fecha1,peliculas1,estado1)
id1,nombre1,fecha1,peliculas1,estado1 = leer_usuario(user1)
if id2 == menor:
grabar_usuario(user_m,id2,nombre2,fecha2,peliculas2,estado2)
id2,nombre2,fecha2,peliculas2,estado2=leer_usuario(user2)
if id3 == menor:
grabar_usuario(user_m,id3,nombre3,fecha3,peliculas3,estado3)
id3,nombre3,fecha3,peliculas3,estado3 = leer_usuario(user3)
print("Merge Realizado Correctamente")
enter=input("Enter para continuar ...")
user_m.close()
sub_menu_user()
############### ALTA DE USUARIO #######################
def validoArchivo(): #AGREGE ACA
try:
arch=open("usuario_maestro.bin","r+")
valido=True
except:
arch=open("usuario_maestro.bin","w")
valido=False
return arch,valido
def buscar_id(): #MODIFIQUE
arch,valido=validoArchivo()
if valido == False:
vocal='a'
num='100'
nuevo_id=vocal+num
elif valido == True:
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
while id != 'end':
id_anterior=id
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
num=int(id_anterior[1:4])
if num < 999:
vocal= id_anterior[0]
num+= 1
nuevo_id = vocal + str(num)
elif num == 999:
vocal= chr(ord(id_anterior[0])+1)
num=100
nuevo_id= vocal + str(num)
return nuevo_id
def alta_user():
id=buscar_id()
arch=open("usuario_maestro.bin","r+")
arch.seek(0,2)
nombre=input('Nombre y Apellido: ')
fecha=input('Fecha de nacimiento ddmmaaaa: ')
print("Su ID es: ",id) #AGREGE
peliculas=' '
estado = 'a'
grabar_usuario(arch,id,nombre,fecha,peliculas,estado)
print("Usuario dado de alta satisfactoriamente.")
enter=input("Enter para continuar ...")
arch.close()
sub_menu_user()
############ BAJA DE USUARIO ###################
def buscar_posicion(buscado):
arch=open("usuario_maestro.bin","r")
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
while nombre != buscado:
pos=arch.tell()
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
arch.close()
return pos,id,nombre,fecha,peliculas
def formarLista(): #Agrege
lista=[]
arch=open("usuario_maestro.bin","r")
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
while id != 'end':
lista.append(nombre)
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
arch.close()
return lista
def baja_user(): #MODIFIQUE
listaNombres=formarLista()
buscado=input('Ingrese nombre: ')
if buscado in listaNombres:
baja= 'b'
pos,id,nombre,fecha,peliculas=buscar_posicion(buscado)
arch=open("usuario_maestro.bin","r+")
arch.seek(pos)
grabar_usuario(arch,id,nombre,fecha,peliculas,'b')
print("Usuarios dado de Baja Satisfactoriamente.")
enter=input("Enter para continuar ...")
arch.close()
sub_menu_user()
else:
print("Nombre se encuentra en nuestra Base de datos.")
enter=input("Enter para continuar ...")
baja_user()
############ MOSTRAR LISTADO #################
def mostrar_listado():
arch=open("usuario_maestro.bin","r")
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
print(" LISTADO DE USUARIOS.")
while id != 'end':
print("{}|{}|{}|{}|{}|".format(id,nombre,fecha,peliculas,estado))
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
arch.close()
enter=input("Enter para continuar ...")
sub_menu_user()
########## AlTAS DE PELICULAS ###############
def ultimo_id(): #### busca el ultimo id de las peliculas
lista=[]
file=open("archivo_peliculas.bin","rb")
seguir =True
while seguir:
try:
lista=pickle.load(file)
except EOFError:
seguir=False
ide=lista[0]
file.close()
return ide
def genera_id_pelicula(): ## genera el id subsiguiente al ultimo de las peliculas
ide=ultimo_id()
numero=int(ide[2:5])
numero_ant=numero
letra=ide[0:2]
if numero<999 :
numero+=1
else:
numero=0
numero=str(numero).zfill(3)
dere=ide[1]
izqui=ide[0]
if dere == "z" and numero_ant==999:
dere="a"
letra=chr(ord(izqui)+1)+dere
else:
letra=izqui+chr(ord(dere)+1)
id_pelicula=letra+str(numero).zfill(3)
return id_pelicula
def pedir_datos(): ### carga de datos de la pelicula
pelicula=[]
id_pelicula=genera_id_pelicula()
titulo=input("Titulo: ")
director=input("Director: ")
genero=input("Genero: ")
puntaje=input("Puntaje: ")
pelicula=[id_pelicula,titulo,director,genero,puntaje]
return pelicula
def alta_pelicula(): ##alta de una pelicula de un archivo ya creado
file = open("archivo_peliculas.bin","rb+")
file.seek(0,2)
lista=pedir_datos()
pickle.dump(lista,file)
file.close()
enter=input("Enter para continuar ...")
sub_menu_peliculas()
############# BAJA DE PELICULAS ##############
def cargar_al_archivo(dic):
file=open("archivo_peliculas.bin","wb")
for clave in dic:
lista=[clave,dic[clave][0],dic[clave][1],dic[clave][2],dic[clave][3]]
pickle.dump(lista,file)
file.close()
def mostrar_peliculas():
file=open("archivo_peliculas.bin","rb")
seguir =True ## con este codigo evito poner todos los .load de cada pickle##
print("CODIGO PELICULA")
while seguir:
try:
elem=pickle.load(file)
except EOFError:
seguir=False
else:
print(elem[0].ljust(10),elem[1],end='\n')
file.close()
def baja_pelicula():
dic={}
file1=open("archivo_peliculas.bin","rb")
seguir =True
while seguir:
try:
lista=pickle.load(file1)
dic[lista[0]]=[lista[1],lista[2],lista[3],lista[4]]
except EOFError:
seguir=False
file1.close()
mostrar_peliculas()
codigo_pelicula=input("ingrese el codigo de la pelicula a dar de baja: ")
del dic[codigo_pelicula]
cargar_al_archivo(dic)
enter=input("Enter para continuar ...")
sub_menu_peliculas()
######## MOSTAR PELICULAS POR PUNTAJE #######
def carga_lista():
lista=[]
file1=open("archivo_peliculas.bin","rb")
seguir =True
while seguir:
try:
l=pickle.load(file1)
lista.append({'ide':l[0],'titulo':l[1],'director':l[2],'genero':l[3],'puntaje':l[4]})
except EOFError:
seguir=False
return lista
def mostrar_lista(lista):
print(" {} {} {}".format('PELICULA'.ljust(22),'PUNTAJE'.ljust(12),'GENERO'))
for pelicula in lista:
print(pelicula['titulo'].ljust(25),pelicula['puntaje'].ljust(10),pelicula['genero'].rjust(20))
def pelicula_por_puntaje():
lista=carga_lista()
lista.sort(key=lambda x:(x['puntaje'],x['genero']), reverse=True)
mostrar_lista(lista)
enter=input("Enter para continuar ...")
sub_menu_peliculas()
############ PROMEDIO DE PUNTAJES DE LAS PELICULAS #####
def cargaLista(): # SE REPITE EN MOSTAR PELI POR PUNTAJE SI LO QUITO ME TIRA ERROR
lista=[]
file1=open("archivo_peliculas.bin","rb")
seguir =True
while seguir:
try:
l=pickle.load(file1)
lista.append(l)
except EOFError:
seguir=False
return lista
def leer_archivo_binario(file_film):
try:
lista =pickle.load(file_film)
return lista
except EOFError:
lista=[" "," "," "," ",0]
return lista
def mostrar_lista_cortecontrol():
with open("archivo_peliculas_aux.bin","rb") as file_film:
lista=leer_archivo_binario(file_film)
director,genero,puntaje=lista[2],lista[3],lista[4]
total_peliculas=0
while genero != ' ':
total_genero=0
contador_genero=0
genero_anterior=genero
print("Genero:",genero.upper())
while genero != ' ' and genero == genero_anterior:
total_puntaje=0
contador_director=0
director_anterior=director
while genero != ' ' and genero == genero_anterior and director_anterior == director:
total_puntaje +=int(puntaje)
contador_director+=1
lista=leer_archivo_binario(file_film)
director,genero,puntaje=lista[2],lista[3],lista[4]
print("promedio de puntaje director: {} es {:.2f}".format(director_anterior,float(total_puntaje/contador_director)))
total_genero+=total_puntaje
contador_genero+=contador_director
print("promedio por {} es {:.2f}".format(genero_anterior,float(total_genero/contador_genero)))
print(" ")
def carga_lista_archivo(lista):
lista_aux=[]
file=open('archivo_peliculas_aux.bin',"wb")
for campo in lista:
pickle.dump(campo,file)
file.close()
def pelis_por_genero():
print("PROMEDIO POR GENERO-DIRECTOR")
lista=cargaLista()
lista.sort(key=lambda x:(x[3],x[2]))
carga_lista_archivo(lista)
mostrar_lista_cortecontrol()
enter=input("Enter para continuar ...")
sub_menu_peliculas()
############ASIGNAR PELICULA A USUARIO##############
def verifico(eleccion,pelicula,lista): #AGREGE
aux=pelicula.split(';')
if eleccion not in aux:
aux.append(eleccion)
lista.append(aux)
return True
return False
def asignar_pelicula(): #AGREGE
usuario=input("Ingrese nombre de usuario: ")
pos,id,nombre,fecha,peliculas=buscar_posicion(usuario)
lista=[]
seguir = 's'
mostrar_peliculas()
while seguir == 's':
eleccion=input("Ingrese Codigo de Pelicula : ")
if verifico(eleccion,peliculas,lista):
enter=input("Pelicula agregada correctamente... enter para continuar.")
seguir= 'n'
else:
print("Pelicula ya esta en su Usuario")
seguir = 's'
unir = ";"
agregar=unir.join(lista[0])
arch=open("usuario_maestro.bin","r+")
arch.seek(pos)
grabar_usuario(arch,id,nombre,fecha,agregar,'a')
arch.close()
sub_menu_peliculas()
################# RECOMENDACIONES##############
def puntoFinal():
dic={}
arch=open("archivo_peliculas.bin","rb")
lista=leer_archivo_binario(arch)
cod,nombrePelicula = lista[0],lista[1]
while cod != " ":
dic[cod] = nombrePelicula
lista=leer_archivo_binario(arch)
cod,nombrePelicula = lista[0],lista[1]
return dic
def convertir(dic):
aux=[]
dicAux={}
for clave in dic:
campo=[clave,dic[clave]]
aux.append(campo)
aux.sort(key=lambda x:x[1],reverse=True)
for campo in aux:
dicAux[campo[0]]=campo[1]
return dicAux
def peliculasVistas(dic):
aux={}
arch=open("usuario_maestro.bin","r")
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
lista=peliculas.split(";")
while id !='end':
for cod in lista:
nombrePeli =dic[cod]
if nombrePeli not in aux:
aux[nombrePeli]= 1
else:
aux[nombrePeli] += 1
id,nombre,fecha,peliculas,estado = leer_usuario(arch)
lista=peliculas.split(";")
aux=convertir(aux)
return aux
def recomendarPelis(peliculas,pelisVistas,dic):
lista=peliculas.split(";")
ultimo=lista[-1]
ultimo=dic[ultimo]
cant=pelisVistas[ultimo]
for clave in pelisVistas:
if pelisVistas[clave] <= cant:
return clave,pelisVistas[clave]
def recomendar_pelicula():
dic=puntoFinal()
pelisVistas=peliculasVistas(dic)
user=input("Ingrese Nomre de Usuario : ")
pos,id,nombre,fecha,peliculas=buscar_posicion(user)
nombrePeli,cant= recomendarPelis(peliculas,pelisVistas,dic)
print("La Recomendacion es ''{}'' que fue vistas {} veces.".format(nombrePeli,cant))
enter=input("Enter para continuar ....")
sub_menu_recomendaciones()
################################################
def filtrar_x_genero(genero, lista):
peli_v = []
for elemento in lista:
if elemento[3] == genero:
peli_v.append(elemento)
return peli_v
def top_5(peli_v, genero):
print("Top por {}: ".format(genero))
for pelicula in peli_v:
if len(peli_v) <= 5:
print(pelicula[1],pelicula[3],pelicula[4])
else:
print(pelicula)
if pelicula == 5:
return
def top_x_genero():
lista = carga_lista()
genero_ingresado = input("Ingresar genero: ")
pelis_x_genero = filtrar_x_genero(genero_ingresado, lista)
pelis_x_genero.sort(key=lambda x :x[4], reverse=True)
top_5(pelis_x_genero, genero_ingresado)
########### SUB MENU RECOMENDACIONES ########
def sub_menu_recomendaciones():
palabra="Sub Menu Recomendaciones."
print(palabra.center(50,"*"))
print("1) Recomendar las 5 Peliculas mas vistas por Genero.")
print("2) Recomendarte Pelicula.")
print("3) Volver al menu principal")
opc = input("Opcion: ")
if opc== '1':
top_x_genero()
elif opc == '2':
recomendar_pelicula()
elif opc == '3':
menu_principal()
else:
enter=input("Opcion incorrecta .. enter para continuar")
sub_menu_recomendaciones()
########## SUB MENUS ##################
def sub_menu_peliculas():
palabra="Sub Menu Peliculas."
print(palabra.center(50,"*"))
print("1) Alta de Pelicula.")
print("2) Baja de Pelicula.")
print("3) Peliculas por Puntajes.")
print("4) Peliculas por Genero.")
print("5) Asignar Pelicula a Usuarios.")
print("6) Volver al Menu Principal.")
opc = input("Opcion: ")
if opc == '1':
alta_pelicula()
elif opc == '2':
baja_pelicula()
elif opc == '3':
pelicula_por_puntaje()
elif opc == '4':
pelis_por_genero() #CAMBIE ACA
elif opc == '5':
asignar_pelicula()
elif opc == '6':
menu_principal()
else:
enter=input("Opcion Incorrecta, enter para continuar ...")
sub_menu_peliculas()
def sub_menu_user():
errores=open("long.txt","w")
palabra="Sub Menu Usuarios."
print(palabra.center(50,"*"))
print("1) Generar Archivo.") #CAMBIE
print("2) Alta de Usuario.")
print("3) Baja de Usuario.")
print("4) Listas los Usuarios.")
print("5) Volver al Menu Principal.")
opc = input("Opcion: ")
if opc == '1':
merge()
elif opc == '2':
alta_user()
elif opc == '3':
baja_user()
elif opc == '4':
mostrar_listado()
elif opc == '5':
menu_principal()
else:
enter=input("Opcion Incorrecta, enter para continuar ...")
sub_menu_user()
def menu_principal():
salir = False
palabra=' Bienvenido a NetFlip '
print(palabra.center(50,"*"))
print("1) Usuarios.")
print("2) Peliculas.")
print("3) Recomendaciones.")
print("4) Salir.")
opc = input("Opcion: ")
if opc == '1':
sub_menu_user()
elif opc == '2':
sub_menu_peliculas()
elif opc == '3':
sub_menu_recomendaciones()
elif opc == '4':
salir = True
else:
enter=input("Opcion Incorrecta, enter para continuar ...")
menu_principal()
######## BLOQUE PRINCIPAL ############
menu_principal() | CarlosOrqueda/TP2 | TP2 new.py | TP2 new.py | py | 21,707 | python | es | code | 0 | github-code | 13 |
73685205459 | from konlpy.tag import Kkma, Okt, Mecab
from pyspark.sql import SparkSession
from pyspark import SparkConf,SparkContext
from konlpy.utils import pprint
import Restaurant
import TopWordCloud
import re
import os
import threading
import json
import nltk
import traceback
import pymongo
import datetime
def strip_e(st):
RE_EMOJI = re.compile('[\U00010000-\U0010ffff]', flags=re.UNICODE)
return RE_EMOJI.sub(r'', st)
def tokenize(doc):
# norm은 정규화, stem은 근어로 표시하기를 나타냄
return ['/'.join(t) for t in Okt().pos(doc, norm=True, stem=True)]
selected_words = []
load_doc = False
def what(doc):
global load_doc
global selected_words
try:
if not load_doc:
if os.path.isfile('train_docs.json'):
print("존재합니다")
with open('train_docs.json', encoding="utf-8") as f:
train_docs = json.load(f)
tokens = [t for d in train_docs for t in d[0]]
print('토큰 수 :', len(tokens))
text = nltk.Text(tokens, name='NMSC')
selected_words = [f[0] for f in text.vocab().most_common(10000)]
load_doc = True
except FileNotFoundError:
traceback.print_exc()
return [doc.count(word) for word in selected_words]
model = None
def sentiment_analysis(rest_list):
global model
from keras.models import load_model
import numpy as np
if model is None:
model = load_model('./saved_model.h5')
else:
pass
for rest in rest_list:
token = tokenize(strip_e(rest['comment']).replace("#", " "))
tf = what(token)
data = np.expand_dims(np.asarray(tf).astype('float32'), axis=0)
score = round(float(model.predict(data))*5, 1)
print(rest['comment'], ':', score, '점')
update_rate(rest['place_id'], rest['r_name'], rest['name'], score)
def update_rate(place_id, r_name, name, rate):
conn = pymongo.MongoClient('118.220.3.71', 27017)
db = conn.crawling
db.rest_instagram.update({'place_id': place_id, 'r_name': r_name, 'name': name},
{'$set': {'rate': rate}})
def update_instagram():
star = []
conn = pymongo.MongoClient('118.220.3.71', 27017)
db = conn.crawling
reviews = db.rest_instagram.find()
for review in reviews:
if review['rate'] != 0 and review['rate'] <= 5:
continue
s = {
'place_id': review['place_id'],
'r_name': review['r_name'],
'name': review['name'],
'comment': review['comment'],
}
star.append(s)
sentiment_analysis(star)
if __name__ =='__main__':
update_instagram() | kkw01234/AReaBigDataPython | instagramrate.py | instagramrate.py | py | 2,718 | python | en | code | 0 | github-code | 13 |
14336696667 |
class Link():
def __init__(self, name, health, armor, power , weapon):
self.name = name
self.health = health
self.armor = armor
self.power = power
self.weapon = weapon
def print_info(self):
print('', self.name)
print('PV:', self.health)
print('armor', self.armor)
print('power', self.power)
print('Weapon', self.weapon)
def strike(self, enemy):
print(
'-> ATTAQUE!! ' + self.name + ' ATTAQUE ' + enemy.name + ' avec une force de ' + str(self.power) + ' avec un(e) ' + self.weapon + '\n')
enemy.armor -= self.power
if enemy.armor < 0:
enemy.health += enemy.armor
enemy.armor = 0
if enemy.health < 0:
enemy.health = 0
print('Vos Pv sont à ' + str(self.health) + '\n' + 'Les Pv de ' + enemy.name + ' decendent à ' + str(enemy.health))
print(
enemy.name + 'est afaibli . \n La defence de ' + enemy.name + ' decend à '
+ str(enemy.armor) + ', et les PV de ' + enemy.name + ' decendent à ' +
str(enemy.health) + '\n')
def fight(self, enemy):
print(
self.name + ' decide de se battre contre ' + enemy.name
)
while self.health and enemy.health > 0:
self.strike(enemy)
print(
enemy.name + ' est mort \n' + 'Vos Pv decendent à ' + str(self.health) + ' et Votre defence à '+ str(self.armor) )
# +'\n' + str(self.power)
| WalkX21/Aprentissage_des_classes | zelda2.py | zelda2.py | py | 1,736 | python | fr | code | 0 | github-code | 13 |
3406102348 | from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from project_dataclasses.Processed_dna_rna import Processed_dna_rna
from project_dataclasses.Processed_protein import Processed_protein
import json
class Sequencer:
def __init__(self):
raise Exception(
"Only a sorcerer can invoke this object. You are not a sorcerer!")
@staticmethod
def manage_sequence(data):
if data.seq_type == "dna-seq":
return Sequencer.manage_dna(data)
elif data.seq_type == "rna-seq":
return Sequencer.manage_rna(data)
elif data.seq_type == "protein-seq":
return Sequencer.manage_protein(data)
else:
raise ValueError(
"Sequence Type provided by the database is not correct")
@staticmethod
def manage_dna(data):
sequence = Seq(data.sequence, IUPAC.unambiguous_dna)
treated_data = Processed_dna_rna(
creation_date=data.creation_date.strftime("%d/%m/%Y, %H:%M:%S"),
translation_table=data.translation_table,
coding_dna=str(sequence),
dna_c=str(sequence.complement()),
dna_rc=str(sequence.reverse_complement()),
rna_m=str(sequence.transcribe()),
rna_m_c=str(sequence.complement().transcribe()),
protein=str(sequence.translate(
table=data.translation_table)),
protein_to_stop=str(sequence.translate(
table=data.translation_table, to_stop=True))
)
return Sequencer.extract_sequence_data(treated_data)
@staticmethod
def manage_rna(data):
sequence = Seq(data.sequence, IUPAC.unambiguous_rna)
treated_data = Processed_dna_rna(
creation_date=data.creation_date.strftime("%d/%m/%Y, %H:%M:%S"),
translation_table=data.translation_table,
coding_dna=str(sequence.back_transcribe()),
dna_c=str(sequence.back_transcribe().complement()),
dna_rc=str(sequence.back_transcribe().reverse_complement()),
rna_m=str(sequence),
rna_m_c=str(sequence.complement()),
protein=str(sequence.translate(
table=data.translation_table)),
protein_to_stop=str(sequence.translate(
table=data.translation_table, to_stop=True))
)
return Sequencer.extract_sequence_data(treated_data)
@staticmethod
def manage_protein(data):
sequence = Seq(data.sequence, IUPAC.protein)
treated_data = Processed_protein(
creation_date=data.creation_date.strftime(
"%d/%m/%Y, %H:%M:%S UTC"),
translation_table=data.translation_table,
protein=str(sequence),
protein_to_stop=str(
sequence) if "*" not in sequence else sequence[0:sequence.find("*")]
)
return Sequencer.extract_sequence_data(treated_data)
@staticmethod
def extract_sequence_data(object):
from Bio.SeqUtils import GC, molecular_weight
from Bio.SeqUtils import MeltingTemp as mt
try:
dna_data = {
'coding_dna': str(object.coding_dna).upper(),
'dna_c': str(object.dna_c).upper(),
'dna_rc': str(object.dna_rc).upper(),
'dna_nucleotide_count': {},
'gc_count': GC(object.coding_dna),
'at_count': 100 - GC(object.coding_dna),
'mt': mt.Tm_NN(object.coding_dna),
'single_strand_molecular_weight': molecular_weight(object.coding_dna, circular=False, double_stranded=False, seq_type="DNA"),
'double_strand_molecular_weight': molecular_weight(object.coding_dna, circular=False, double_stranded=True, seq_type="DNA"),
'circular_single_strand_molecular_weight': molecular_weight(object.coding_dna, circular=True, double_stranded=False, seq_type="DNA"),
'circular_double_strand_molecular_weight': molecular_weight(object.coding_dna, circular=True, double_stranded=True, seq_type="DNA"),
}
for aa in "ATCG":
dna_data['dna_nucleotide_count'][f"{aa}"] = object.coding_dna.count(
aa)
except Exception as e:
print(e)
dna_data = None
try:
rna_data = {
'rna_m': str(object.rna_m).upper(),
'rna_m_c': str(object.rna_m_c).upper(),
'rna_nucleotide_count': {},
'gc_count': GC(object.rna_m),
'au_count': 100 - GC(object.rna_m),
'mt': mt.Tm_NN(object.rna_m),
'single_strand_molecular_weight': molecular_weight(object.rna_m, circular=False, double_stranded=False, seq_type="RNA"),
'double_strand_molecular_weight': molecular_weight(object.rna_m, circular=False, double_stranded=True, seq_type="RNA"),
'circular_single_strand_molecular_weight': molecular_weight(object.rna_m, circular=True, double_stranded=False, seq_type="RNA"),
'circular_double_strand_molecular_weight': molecular_weight(object.rna_m, circular=True, double_stranded=True, seq_type="RNA"),
}
for aa in "AUCG":
rna_data['rna_nucleotide_count'][f"{aa}"] = object.rna_m.count(
aa)
except Exception as e:
print(e)
rna_data = None
return json.dumps(dna_data), json.dumps(rna_data), json.dumps(Sequencer._extract_protein_data(object))
@staticmethod
def _extract_protein_data(object):
try:
from Bio.SeqUtils import molecular_weight
from Bio.Seq import Seq
protein_data = {
'frame1': str(object.protein).upper(),
'aa_count': {},
'molecular_weight_f1': molecular_weight(object.protein.upper().replace("*", ""), seq_type="protein"),
}
for aa in "FLSYCWPHQRIMTNKVADEG*":
protein_data['aa_count'][f"{aa}"] = object.protein.count(aa)
try:
new_sequence = Seq(object.coding_dna)
protein_data['frame2'] = str(new_sequence[1:].translate(
object.translation_table))
protein_data['molecular_weight_f2'] = molecular_weight(
protein_data["frame2"].replace("*", ""), seq_type="protein")
protein_data['frame3'] = str(new_sequence[2:].translate(
object.translation_table))
protein_data['molecular_weight_f3'] = molecular_weight(
protein_data["frame3"].replace("*", ""), seq_type="protein")
except:
pass
except Exception as e:
print(e)
protein_data = None
return protein_data
| RodrigoCury/bioinformatic-project | biop/pyHelpers/Sequencer.py | Sequencer.py | py | 7,046 | python | en | code | 0 | github-code | 13 |
36654779416 | import sys
import numpy
import cv2 as cv
import threading
from application.camera import Camera
from application.gui import GUI
from sockets.client import ClientSocket
class App:
# Initializes the App class, setting up the client socket, camera, and GUI
def __init__(self, client_data):
# Initialize the client socket with the given client data and set the on_message callback to receive_frame
self.client = ClientSocket(client_data=client_data,
on_message=self.receive_frame)
# If the client socket is successfully set up, start it and set up the camera and GUI
if self.client.setup():
self.client.start()
self.camera = Camera(handle_frame=self.handle_frame)
self.camera_thread = threading.Thread(target=self.camera.capture)
self.camera_thread.start()
self.gui = GUI(handle_close=self.close_app)
self.gui.display()
# Sends the given frame to the client socket
def handle_frame(self, frame):
self.client.send(frame)
# Receives a frame from the client socket and decodes it, updating the GUI with the decoded frame
def receive_frame(self, frame):
new_frame = frame['message']
array_frame = numpy.frombuffer(new_frame, dtype='uint8')
decoded_frame = cv.imdecode(array_frame, 1)
self.gui.current_frame = ({'username': frame['username'], 'frame': decoded_frame})
# Close the app
def close_app(self):
self.client.close_connection()
self.camera.close_camera()
self.gui.close_windows()
sys.exit() | RichiiCD/PyVideoChat | application/app.py | app.py | py | 1,642 | python | en | code | 1 | github-code | 13 |
40424599112 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Libraries dependancies :
#
#
# Haroun dependancies :
#
# Import core concept Intent.
from core.concepts.Intent import Intent
# Import core concept Intent.
from core.concepts.Response import Response
#
#
# Globals :
#
#
#
#
class Interaction(object):
""" Concept of Interaction for Haroun. """
def __init__(self, stimulus):
"""
Interaction class constructor.
Interaction concept class, manage interaction infos.
Parameters
----------
stimulus : Stimulus
Stimulus at the origin of the interaction.
Returns
_______
void
"""
# ! Attributs
# Error flag.
self.error = False
# Interaction duration
self.duration = None
# Interaction stimulus.
self.stimulus = stimulus
# Intent : Intent that match the Interaction (defined by Recognition)
self.intent = Intent()
# Response : Interaction Response.
self.response = Response()
# Skills list for interaction execution.
self.skills = []
# Interaction state of mind.
self.mind = None
# Fonctions de manipulations :
def add_response(self, raw_text):
"""
Add response to response object.
---
Parameters
raw_text : String
Response raw text.
---
Return
None
"""
# Add raw_text to response object.
self.response.add_raw_text(raw_text)
def add_error(self, error_text):
"""
Add error text to response object.
---
Parameters
error_text : String
Error message.
---
Return
None
"""
# Add error_text to response object.
self.response.add_error(error_text)
# Flag error.
self.error = True
| LounisBou/haroun | core/concepts/Interaction.py | Interaction.py | py | 2,356 | python | en | code | 0 | github-code | 13 |
23851097539 | # Django settings for frankencode project.
# Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Mark Doffman', 'mark.doffman@gmail.com'),
)
SERVER_EMAIL = 'error@frankencode.appspotmail.com'
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
#MEDIA_ROOT = '/Users/mdoff/Projects/frankencode/media/upload'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
#MEDIA_URL = '/site_media/upload/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
#ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3!x2dxou#f9)y9+d@$lctqcjsmerey#vqh6$zuafu#if(37@dm'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
import os
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
)
INSTALLED_APPS = (
#'django.contrib.auth',
'djangoappengine',
'djangotoolbox',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'frankfeeds',
'wordpress',
# Uncomment the next line to enable the admin:
#'django.contrib.admin',
)
CACHE_BACKEND = 'memcached://'
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += ("context.tweets.latest_tweets", "context.this_site.this_site")
TWITTER_USER = 'doffm'
TWITTER_TWEETS = 3
BLOG_FEED = 'http://doffm.posterous.com/rss.xml'
WP_READ_ONLY = False
#STATIC_DOC_ROOT = '/Users/mdoff/Projects/frankencode/media'
#WP_STATIC_ROOT = "/Users/mdoff/Projects/frankencode/wordpress/media"
SITE_ID = 1
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
# Activate django-dbindexer if available
try:
import dbindexer
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
INSTALLED_APPS += ('dbindexer',)
except ImportError:
pass
| doffm/Frankencode | settings.py | settings.py | py | 4,137 | python | en | code | 1 | github-code | 13 |
10007504813 | import subprocess
class CmdRunner(object):
def __init__(self, cmd_prefix=''):
self._cmd_prefix = cmd_prefix
def run(self, cmd):
cmd = self._cmd_prefix.split() + cmd
proc = subprocess.Popen(cmd, # nosec
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
retcode = proc.poll()
if retcode != 0:
output = 'stdout: "%s", stderr: "%s"' % (stdout, stderr)
print (output)
raise subprocess.CalledProcessError(retcode, cmd, output)
return retcode, stdout
| letterwuyu/cuvette | src/common/cmd_runner.py | cmd_runner.py | py | 645 | python | en | code | 1 | github-code | 13 |
73996307858 | from preprocess import get_dataset, DataLoader, collate_fn_transformer
from network import *
from device import *
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import os
from tqdm import tqdm
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = hp.lr * warmup_step**0.5 * min(step_num * warmup_step**-1.5, step_num**-0.5)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
dataset = get_dataset()
global_step = 0
m = nn.DataParallel(Model().to(dev))
m.train()
optimizer = t.optim.Adam(m.parameters(), lr=hp.lr)
pos_weight = t.FloatTensor([5.]).to(dev)
writer = SummaryWriter()
for epoch in range(hp.epochs):
dataloader = DataLoader(dataset, batch_size=hp.batch_size, shuffle=True, collate_fn=collate_fn_transformer, drop_last=True, num_workers=16)
pbar = tqdm(dataloader)
for i, data in enumerate(pbar):
pbar.set_description("Processing at epoch %d"%epoch)
global_step += 1
if global_step < 400000:
adjust_learning_rate(optimizer, global_step)
character, mel, mel_input, pos_text, pos_mel, _ = data
stop_tokens = t.abs(pos_mel.ne(0).type(t.float) - 1)
character = character.to(dev)
mel = mel.to(dev)
mel_input = mel_input.to(dev)
pos_text = pos_text.to(dev)
pos_mel = pos_mel.to(dev)
mel_pred, postnet_pred, attn_probs, stop_preds, attns_enc, attns_dec = m.forward(character, mel_input, pos_text, pos_mel)
mel_loss = nn.L1Loss()(mel_pred, mel)
post_mel_loss = nn.L1Loss()(postnet_pred, mel)
loss = mel_loss + post_mel_loss
optimizer.zero_grad()
# Calculate gradients
loss.backward()
nn.utils.clip_grad_norm_(m.parameters(), 1.)
# Update weights
optimizer.step()
if global_step % hp.save_step == 0:
t.save({'model':m.state_dict(),
'optimizer':optimizer.state_dict()},
os.path.join(hp.checkpoint_path,'checkpoint_transformer_%d.pth.tar' % global_step))
if __name__ == '__main__':
main() | hry8310/ai | dl/pytorch-cpu-gpu-TTS/train_transformer.py | train_transformer.py | py | 2,459 | python | en | code | 2 | github-code | 13 |
30642989962 | ## Programa escrito en python orientado a la deteccion y conteo de palabras, signos y espacios en textos ingresados por el usuario
import string
def count_words(text):
"""
Cuenta el número de palabras en un texto.
Args:
text: La cadena de caracteres a contar.
Returns:
El número de palabras en el texto.
"""
words = text.split()
return len(words)
def count_signs(text):
"""
Cuenta el número de signos en un texto.
Args:
text: La cadena de caracteres a contar.
Returns:
El número de signos en el texto.
"""
signs = set(string.punctuation)
count = 0
for character in text:
if character in signs:
count += 1
return count
def count_spaces(text):
"""
Cuenta el número de espacios en un texto.
Args:
text: La cadena de caracteres a contar.
Returns:
El número de espacios en el texto.
"""
return text.count(' ')
def main():
"""
Solicita al usuario un texto y muestra el número de palabras, signos y espacios.
"""
text = input("Ingrese un texto: ")
words = count_words(text)
signs = count_signs(text)
spaces = count_spaces(text)
print("El texto tiene {} palabras, {} signos y {} espacios.".format(
words, signs, spaces))
if __name__ == "__main__":
main() | ArochaDeveloper/libreria-en-python | libreria.py | libreria.py | py | 1,272 | python | es | code | 0 | github-code | 13 |
37402475765 | import pandas as pd
from PIL import Image
import streamlit as st
from streamlit_drawable_canvas import st_canvas
import glob
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import argparse
import sys
from matplotlib import pyplot as plt
import matplotlib
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#---STREAMLIT INTERFACE
st.markdown("<h1 style='text-align: center; color: black;'>Where is Wally?</h1>", unsafe_allow_html=True)
#Add instructions
with st.expander('Help! This is my first time', expanded=False):
st.write("The green box indicates where the algorithm located Wally. If you agree, you can move on to the next image. If you disagree, you can correct the location by placing a square around Wally's face. Click and drag the mouse to make a square.")
#---SIDEBAR
#realtime_update = st.checkbox("Update in realtime", True)
#--ADD SOMETHING HERE TO GET THE IMAGE+GUESS FROM THE MODEL
#---GET IMAGE
imgs = glob.glob("images/*.jpg") #just the base images right now
# Save model path
model_path = './trained_model/frozen_inference_graph.pb'
#plots in columns:
left_column_upper, mid_column_upper,midr_column_upper, right_column_upper = st.columns(4)
#Create functions
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
#Save variables
label_map = label_map_util.load_labelmap('./trained_model/labels.txt')
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=1, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#Initialise index
if 'index' not in st.session_state:
st.session_state.index = 0
#Create save button
with right_column_upper:
save = st.button('Next image', help='Saving the image and loading the next. You can always go back and make corrections.')
if save:
st.session_state.index += 1
with left_column_upper:
previous = st.button('Previous image', help='Go back to the previous image')
if previous:
st.session_state.index -= 1
with detection_graph.as_default():
with tf.compat.v1.Session(graph=detection_graph) as sess:
parser = argparse.ArgumentParser()
#parser.add_argument('image_path')
#args = parser.parse_args()
image_np = load_image_into_numpy_array(Image.open(imgs[st.session_state.index]))
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: np.expand_dims(image_np, axis=0)})
#print(scores[0][0])
if scores[0][0] < 0.1:
sys.exit('Wally not found :(')
#print('Wally found')
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=5)
plt.figure(figsize=(12, 8))
plt.imshow(image_np)
plt.show()
plt.savefig(f'{imgs[st.session_state.index]}.png')
current_score = round(100 * float(scores[0][0]), 1)
if current_score > 90:
st.subheader(f'The algorithm is {current_score} % certain!✅')
elif current_score < 90 and current_score > 30:
st.subheader(f'The algorithm is {current_score} % certain!🤔')
else:
st.subheader(f'The algorithm is {current_score} % certain! â›”')
#Create a canvas component
canvas_result = st_canvas(
fill_color="rgba(255, 165, 0, 0.3)", # Fixed fill color with some opacity
background_image=Image.open(f'{imgs[st.session_state.index]}.png') if f'{imgs[st.session_state.index]}.png' else None,
update_streamlit=False,
height=500,
width=750,
drawing_mode='rect',
point_display_radius= 0,
key="canvas",
stroke_width= 2,
)
# Do something interesting with the image data and paths
#if canvas_result.image_data is not None:
# st.image(canvas_result.image_data)
#if canvas_result.json_data is not None:
# objects = pd.json_normalize(canvas_result.json_data["objects"]) # need to convert obj to str because PyArrow
# for col in objects.select_dtypes(include=['object']).columns:
# objects[col] = objects[col].astype("str")
# st.dataframe(objects)
#Create function to save positions of drawing
def save_corrected_data(dataframe):
dataframe.to_csv('data/hybrid_data.csv')
left_column_lower, mid_column_lower, right_column_lower = st.columns(3)
with mid_column_lower:
if st.button('Export hybrid intelligent data', help='Exports the updated coordinates as a .csv and stores it in the data folder'):
save_corrected_data(objects)
#Add instructions
with st.expander('Learn more about the design', expanded=False):
st.image('flowchart.png')
st.markdown("<h1 style='text-align: center; color: black;'>HOW OUR SOLUTION RELATES TO THE FOUR AKATA CRITERIA</h1>", unsafe_allow_html=True)
text = '''
\n**Collaborative**
\n Our collaborative solution combines the strengths of:
\n- _the AI (the neural network):_ quickly identify the point in a big image which is most likely to contain Wally + report certainty
\n- _the human_: quickly and intuitively check doubt cases, and correct the AI where it’s wrong. Human task is smaller since the big hurdle of scanning a full detailed image has been outsourced to the computer, and the human can focus on doubt cases.
This human-computer collaboration can quickly and accurately locate Wally in a large selection of images.
\n**Adaptive**
\n Our solution could be extended to adapt to the user input.
\n - _Synergy_: the feedback from the human (where was Wally actually?) could ideally be fed back into the network (as a ‘true label’) to make it perform better over time!
Potentially, it could also utilize transfer learning and adapt to find other targets - for example Wanda - by using user input!
\n**Responsible**
\nLegal and moral values aren’t really relevant for this specific case, and no ethics are involved.
\n**Explainable**
\n Our solution could be expanded to be more explainable. Ideally, the NN should be able to say WHY it thinks Wally is in a particular place.
For example by highlighting the pixels of the guess that makes it most confident that this is Wally.
e.g. maybe it’s the striped shirt, and not the face.
'''
st.write(text)
| KiriKoppelgaard/StudyGroupIdaTheaKiri | Where_Is_Wally_exercise/Wally_interface.py | Wally_interface.py | py | 7,220 | python | en | code | 1 | github-code | 13 |
2433766873 | #!/usr/bin/env python3
"""
.. automodule:: phile.launcher.cmd
.. automodule:: phile.launcher.defaults
----------------------------------
For starting and stopping services
----------------------------------
"""
# Standard libraries.
import asyncio
import collections
import collections.abc
import contextlib
import dataclasses
import enum
import functools
import itertools
import logging
import types
import typing
# Internal modules.
import phile
import phile.asyncio
import phile.asyncio.pubsub
import phile.builtins
import phile.capability
_KeyT = typing.TypeVar("_KeyT")
_T = typing.TypeVar("_T")
_ValueT = typing.TypeVar("_ValueT")
Awaitable = collections.abc.Awaitable[typing.Any]
NullaryAsyncCallable = collections.abc.Callable[[], Awaitable]
NullaryCallable = collections.abc.Callable[[], typing.Any]
Command = NullaryAsyncCallable
CommandLines = list[Command]
# TODO[mypy issue #1422]: __loader__ not defined
_loader_name: str = __loader__.name # type: ignore[name-defined]
_logger = logging.getLogger(_loader_name)
class Type(enum.IntEnum):
"""
Determines how to start and stop a :class:`Descriptor`.
This controls what to call in order to start a unit,
and the conditions sufficient to determine that
the unit has successfully started.
When a unit has started successfully,
units that depends on it being started successfully
may then also start if it requested to be started.
That is, the conditions should reflect
whether the unit is ready to perform external requests.
This also controls the coroutine to be identified
as the main subroutine of the unit.
When the main subroutine returns, after being cancelled or otherwise,
the unit is considered to have stopped.
"""
SIMPLE = enum.auto()
"""
A simple :class:`Descriptor` startes
when its :attr:`~Descriptor.exec_start` coroutine is scheduled.
In this context, a coroutine is considered scheduled
if it is queued in the event loop to be ran soon,
and the coroutine is not necessary given control at any point.
This behaviour matters if the coroutine is cancelled
before it is given a chance to run.
However, in practice, because of implementation detail,
the coroutine is always given a chance to run at least once.
"""
EXEC = enum.auto()
"""
An exec :class:`Descriptor` startes
when its :attr:`~Descriptor.exec_start` coroutine starts.
In this context, a coroutine is considered started
when its routine is given a chance to ran,
and has yielded control back to the event loop.
"""
FORKING = enum.auto()
"""
A forking :class:`Descriptor` starts
when its :attr:`~Descriptor.exec_start` coroutine returns.
A forking unit must return an :class:`asyncio.Future`
from its ``exec_start`` coroutine.
The future is then identified s the main subroutine of the unit.
"""
CAPABILITY = enum.auto()
"""
A capability :class:`Descriptor` starts
when its `capability_name` is set in registry.
"""
class Descriptor(typing.TypedDict, total=False):
after: set[str]
before: set[str]
binds_to: set[str]
capability_name: str
conflicts: set[str]
default_dependencies: bool
exec_start: CommandLines
exec_stop: CommandLines
type: Type
class CapabilityNotSet(RuntimeError):
pass
class MissingDescriptorData(KeyError):
pass
class NameInUse(RuntimeError):
pass
class OneToManyTwoWayDict(dict[_KeyT, set[_ValueT]]):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
self._inverses = collections.defaultdict[_ValueT, set[_KeyT]](
set
)
super().__init__(*args, **kwargs)
def __setitem__(self, key: _KeyT, new_values: set[_ValueT]) -> None:
if key in self:
self.__delitem__(key)
super().__setitem__(key, new_values)
try:
inverses = self._inverses
for value in new_values:
inverses[value].add(key)
# Defensive. Not sure how to force this.
# It should not happen in normal usage.
except: # pragma: no cover
self.__delitem__(key)
raise
def __delitem__(self, key: _KeyT) -> None:
try:
inverses = self._inverses
existing_values = self[key]
for value in existing_values:
inverse_set = inverses.get(value)
# Defensive. Not sure how to force this.
# It should not happen in normal usage.
if inverse_set is not None: # pragma: no branch
inverse_set.discard(key)
if not inverse_set:
inverses.pop(value, None)
del inverse_set
finally:
super().__delitem__(key)
@property
def inverses(self) -> types.MappingProxyType[_ValueT, set[_KeyT]]:
return types.MappingProxyType(self._inverses)
@property
def pop(self) -> None: # type: ignore[override]
# pylint: disable=invalid-overridden-method
"""Use ``del`` instead. Inverse bookeeping is done there."""
raise AttributeError(
"'OneToManyTwoWayDict' object has no attribute 'pop'"
)
class Database:
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
# TODO[mypy issue 4001]: Remove type ignore.
super().__init__(*args, **kwargs) # type: ignore[call-arg]
self.known_descriptors: dict[str, Descriptor] = {}
"""
The launcher names added, with given :class:`Descriptor`.
The descriptors added are stored as given
for debugging purposes only.
In particular, if the descriptors given are subsequently changed,
the stored description would be changed as well,
but associated launcher behaviour would not be.
(That may be achieved by removing and adding again,
but that would likely stop all dependents.)
Therefore, the data is parsed when adding as necessary,
and should not be relied upon afterwards.
"""
self.after = OneToManyTwoWayDict[str, str]()
"""Order dependencies of launchers."""
self.before = OneToManyTwoWayDict[str, str]()
"""Order dependencies of launchers."""
self.binds_to = OneToManyTwoWayDict[str, str]()
"""Dependencies of launchers."""
self.capability_name: dict[str, str] = {}
"""Capabilities registered by launcher."""
self.conflicts = OneToManyTwoWayDict[str, str]()
"""Conflicts between launchers."""
self.default_dependencies: dict[str, bool] = {}
self.exec_start: dict[str, CommandLines] = {}
"""Coroutines to call to start a launcher."""
self.exec_stop: dict[str, CommandLines] = {}
"""Coroutines to call to stop a launcher."""
self.remover: dict[str, NullaryCallable] = {}
"""Callback to call to remove launchers from the database."""
self.type: dict[str, Type] = {}
"""Initialisation and termination conditions of launchers."""
def add(self, entry_name: str, descriptor: Descriptor) -> None:
"""Not thread-safe."""
self._check_new_descriptor(entry_name, descriptor)
known_descriptors = self.known_descriptors
# Not sure why Pylint thinks phile.builtins is a dict.
provide_item = (
phile.builtins.provide_item # pylint: disable=no-member
)
with contextlib.ExitStack() as stack:
stack.enter_context(
provide_item(known_descriptors, entry_name, descriptor)
)
def provide_option(option_name: str, default: _T) -> None:
stack.enter_context(
provide_item(
getattr(self, option_name),
entry_name,
descriptor.get(option_name, default),
)
)
provide_option("after", set[str]())
provide_option("before", set[str]())
provide_option("binds_to", set[str]())
provide_option("capability_name", "")
provide_option("conflicts", set[str]())
provide_option("default_dependencies", True)
provide_option("exec_start", None)
provide_option("exec_stop", [])
default_type = Type.SIMPLE
if self.capability_name[entry_name]:
default_type = Type.CAPABILITY
provide_option("type", default_type)
del default_type
if self.default_dependencies[entry_name]:
before = self.before[entry_name].copy()
before.add("phile_shutdown.target")
self.before[entry_name] = before
conflicts = self.conflicts[entry_name].copy()
conflicts.add("phile_shutdown.target")
self.conflicts[entry_name] = conflicts
self.remover[entry_name] = functools.partial(
stack.pop_all().__exit__, None, None, None
)
def _check_new_descriptor(
self, entry_name: str, descriptor: Descriptor
) -> None:
if entry_name in self.known_descriptors:
raise NameInUse(
"Launchers cannot be added with the same name."
" The following given name is already in use:"
" {entry_name}".format(entry_name=entry_name)
)
if "exec_start" not in descriptor:
raise MissingDescriptorData(
"A launcher.Descriptor must provide"
" a exec_start coroutine to be added."
" It is missing from the unit named"
" {entry_name}".format(entry_name=entry_name)
)
def remove(self, entry_name: str) -> None:
entry_remover = self.remover.pop(entry_name, None)
if entry_remover is not None:
entry_remover()
def contains(self, entry_name: str) -> bool:
return entry_name in self.remover
class EventType(enum.Enum):
START = enum.auto()
STOP = enum.auto()
ADD = enum.auto()
REMOVE = enum.auto()
@dataclasses.dataclass
class Event:
type: EventType
entry_name: str
class Registry:
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
# TODO[mypy issue 4001]: Remove type ignore.
super().__init__(*args, **kwargs) # type: ignore[call-arg]
self._capability_registry = phile.capability.Registry()
self._database = Database()
self._running_tasks: dict[str, asyncio.Future[typing.Any]] = {}
self._start_tasks: dict[str, asyncio.Task[typing.Any]] = {}
self._stop_tasks: dict[str, asyncio.Task[typing.Any]] = {}
self.event_queue = phile.asyncio.pubsub.Queue[Event]()
self.add_default_launchers()
@property
def capability_registry(self) -> phile.capability.Registry:
return self._capability_registry
@property
def database(self) -> Database:
return self._database
@property
def state_machine(self) -> "Registry":
return self
async def add(self, entry_name: str, descriptor: Descriptor) -> None:
# Symmetric counterpart of `remove`.
self.add_nowait(entry_name=entry_name, descriptor=descriptor)
def add_nowait(
self, entry_name: str, descriptor: Descriptor
) -> None:
self._database.add(entry_name=entry_name, descriptor=descriptor)
self.event_queue.put(
Event(type=EventType.ADD, entry_name=entry_name)
)
async def remove(self, entry_name: str) -> None:
await self.stop(entry_name=entry_name)
self.remove_nowait(entry_name=entry_name)
def remove_nowait(self, entry_name: str) -> None:
if not self._database.contains(entry_name):
return
if self.is_running(entry_name=entry_name):
raise RuntimeError("Cannot remove a running launcher.")
self._database.remove(entry_name=entry_name)
self.event_queue.put(
Event(type=EventType.REMOVE, entry_name=entry_name)
)
def contains(self, entry_name: str) -> bool:
return self._database.contains(entry_name)
def start(
self,
entry_name: str,
) -> asyncio.Task[typing.Any]:
start_tasks = self._start_tasks
try:
entry_start_task = start_tasks[entry_name]
except KeyError:
entry_start_task = start_tasks[
entry_name
] = asyncio.create_task(self._do_start(entry_name))
entry_start_task.add_done_callback(
functools.partial(start_tasks.pop, entry_name)
)
return entry_start_task
def stop(
self,
entry_name: str,
) -> asyncio.Task[typing.Any]:
stop_tasks = self._stop_tasks
try:
entry_stop_task = stop_tasks[entry_name]
except KeyError:
entry_stop_task = stop_tasks[
entry_name
] = asyncio.create_task(self._do_stop(entry_name))
entry_stop_task.add_done_callback(
functools.partial(stop_tasks.pop, entry_name)
)
return entry_stop_task
async def _do_start(self, entry_name: str) -> None:
# If a launcher is started while it is being stopped,
# assume a restart-like behaviour is desired.
# So wait till the launcher has stopped before starting.
entry_stop_task = self._stop_tasks.get(entry_name)
if entry_stop_task is not None:
await entry_stop_task
running_tasks = self._running_tasks
if entry_name in running_tasks:
return
running_tasks = self._running_tasks
await self._ensure_ready_to_start(entry_name)
_logger.debug("Launcher %s is starting.", entry_name)
main_task = await self._start_main_task(entry_name)
_logger.debug("Launcher %s has started.", entry_name)
running_tasks[entry_name] = runner_task = asyncio.create_task(
self._clean_up_on_stop(entry_name, main_task)
)
runner_task.add_done_callback(
functools.partial(running_tasks.pop, entry_name)
)
self.event_queue.put(
Event(type=EventType.START, entry_name=entry_name)
)
runner_task.add_done_callback(
lambda _task: (
self.event_queue.put(
Event(type=EventType.STOP, entry_name=entry_name)
)
)
)
async def _do_stop(self, entry_name: str) -> None:
entry_start_task = self._start_tasks.get(entry_name)
if entry_start_task is not None:
await phile.asyncio.cancel_and_wait(entry_start_task)
entry_running_task = self._running_tasks.get(entry_name)
if entry_running_task is not None:
await phile.asyncio.cancel_and_wait(entry_running_task)
async def _clean_up_on_stop(
self, entry_name: str, main_task: asyncio.Future[typing.Any]
) -> None:
try:
# A forced cancellation from this function
# should not happen until dependents are processed
# and a graceful shutdown is attempted.
await asyncio.shield(main_task)
finally:
try:
await self._ensure_ready_to_stop(entry_name)
finally:
try:
_logger.debug("Launcher %s is stopping.", entry_name)
await self._run_command_lines(
self._database.exec_stop[entry_name]
)
finally:
# TODO(BoniLindsley): What to do with exceptions?
await phile.asyncio.cancel_and_wait(main_task)
_logger.debug("Launcher %s has stopped.", entry_name)
async def _ensure_ready_to_start(self, entry_name: str) -> None:
database = self._database
stop = self.stop
_logger.debug("Launcher %s is stopping conflicts.", entry_name)
for conflict in database.conflicts.get(
entry_name, set()
) | database.conflicts.inverses.get(entry_name, set()):
stop(conflict)
_logger.debug(
"Launcher %s is starting dependencies.", entry_name
)
start = self.start
for dependency in database.binds_to[entry_name]:
start(dependency)
_logger.debug(
"Launcher %s is waiting on dependencies.", entry_name
)
after = database.after.get(
entry_name, set()
) | database.before.inverses.get(entry_name, set())
before = database.before.get(
entry_name, set()
) | database.after.inverses.get(entry_name, set())
pending_tasks = set(
filter(
None,
itertools.chain(
map(self._stop_tasks.get, after | before),
map(self._start_tasks.get, after),
),
)
)
if pending_tasks:
await asyncio.wait(pending_tasks)
async def _ensure_ready_to_stop(self, entry_name: str) -> None:
database = self._database
_logger.debug("Launcher %s is stopping dependents.", entry_name)
for dependent in database.binds_to.inverses.get(
entry_name, set()
):
self.stop(dependent)
before = database.before.get(
entry_name, set()
) | database.after.inverses.get(entry_name, set())
pending_tasks = set(
filter(None, map(self._stop_tasks.get, before))
)
if pending_tasks:
_logger.debug(
"Launcher %s is waiting on %s dependent(s).",
entry_name,
len(pending_tasks),
)
await asyncio.wait(pending_tasks)
async def _start_main_task(
self, entry_name: str
) -> asyncio.Future[typing.Any]:
database = self._database
main_task: asyncio.Future[typing.Any] = asyncio.create_task(
self._run_command_lines(database.exec_start[entry_name])
)
try:
unit_type = database.type[entry_name]
if unit_type is Type.EXEC:
await asyncio.sleep(0)
elif unit_type is Type.FORKING:
main_task = await main_task
assert isinstance(main_task, asyncio.Future)
elif unit_type is Type.CAPABILITY:
expected_capability_name = database.capability_name[
entry_name
]
# TODO(BoniLindsley): Stop on capability unregistering.
# Need to use the same event_view as here
# to not miss any events.
event_view = (
self.capability_registry.event_queue.__aiter__()
)
capabilities_set: list[str] = []
async for event in event_view: # pragma: no branch
if event.type is not (
phile.capability.EventType.SET
):
continue
capability_name = (
event.capability.__module__
+ "."
+ event.capability.__qualname__
)
capabilities_set.append(capability_name)
if capability_name == expected_capability_name:
break
if not capabilities_set or (
capabilities_set[-1] != expected_capability_name
):
raise CapabilityNotSet(
"Launcher {} did not set capability {}.\n"
"Only detected the following: {}".format(
entry_name,
expected_capability_name,
capabilities_set,
)
)
return main_task
except:
await phile.asyncio.cancel_and_wait(main_task)
raise
async def _run_command_lines(
self, command_lines: CommandLines
) -> typing.Any:
"""Await the given command lines and return the last result."""
return_value: typing.Any = None
for command in command_lines:
return_value = await command()
return return_value
def is_running(self, entry_name: str) -> bool:
return entry_name in self._running_tasks
def add_default_launchers(self) -> None:
self.add_nowait(
"phile_shutdown.target",
phile.launcher.Descriptor(
default_dependencies=False,
exec_start=[asyncio.get_event_loop().create_future],
),
)
| BoniLindsley/phile | src/phile/launcher/__init__.py | __init__.py | py | 21,024 | python | en | code | 0 | github-code | 13 |
74562970258 | #!/usr/bin/env python
"""
_Express_t_
Express job splitting test
"""
import unittest
import threading
import logging
import time
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.DataStructs.Run import Run
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.SplitterFactory import SplitterFactory
from WMCore.Services.UUIDLib import makeUUID
from WMQuality.TestInit import TestInit
class ExpressTest(unittest.TestCase):
"""
_ExpressTest_
Test for Express job splitter
"""
def setUp(self):
"""
_setUp_
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])
self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = myThread.dbi)
myThread.dbi.processData("""INSERT INTO wmbs_location
(id, site_name, state, state_time)
VALUES (1, 'SomeSite', 1, 1)
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_pnns
(id, pnn)
VALUES (2, 'SomePNN')
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
(location, pnn)
VALUES (1, 2)
""", transaction = False)
insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
insertRunDAO.execute(binds = { 'RUN' : 1,
'HLTKEY' : "someHLTKey" },
transaction = False)
insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
for lumi in [1, 2]:
insertLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : lumi },
transaction = False)
insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
insertStreamDAO.execute(binds = { 'STREAM' : "Express" },
transaction = False)
insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")
self.fileset1 = Fileset(name = "TestFileset1")
self.fileset1.load()
workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
workflow1.create()
self.subscription1 = Subscription(fileset = self.fileset1,
workflow = workflow1,
split_algo = "Express",
type = "Express")
self.subscription1.create()
# keep for later
self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi")
self.releaseExpressDAO = daoFactory(classname = "Tier0Feeder.ReleaseExpress")
self.currentTime = int(time.time())
return
def tearDown(self):
"""
_tearDown_
"""
self.testInit.clearDatabase()
return
def finalCloseLumis(self):
"""
_finalCloseLumis_
"""
myThread = threading.currentThread()
myThread.dbi.processData("""UPDATE lumi_section_closed
SET close_time = 1
""", transaction = False)
return
def getNumActiveSplitLumis(self):
"""
_getNumActiveSplitLumis_
helper function that counts the number of active split lumis
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT COUNT(*)
FROM lumi_section_split_active
""", transaction = False)[0].fetchall()
return results[0][0]
def test00(self):
"""
_test00_
Test that the job name prefix feature works
Test event threshold (single job creation)
Test that only closed lumis are used
Test check on express release
"""
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "Express",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : 0 } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
jobGroups = jobFactory(maxInputEvents = 200, maxInputRate = 23000)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(maxInputEvents = 200, maxInputRate = 23000)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.finalCloseLumis()
jobGroups = jobFactory(maxInputEvents = 200, maxInputRate = 23000)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.releaseExpressDAO.execute(binds = { 'RUN' : 1 }, transaction = False)
jobGroups = jobFactory(maxInputEvents = 200, maxInputRate = 23000)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Express-"),
"ERROR: Job has wrong name")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test01(self):
"""
_test01_
Test event threshold (multiple job creation)
"""
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "Express",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
self.releaseExpressDAO.execute(binds = { 'RUN' : 1 }, transaction = False)
jobGroups = jobFactory(maxInputEvents = 199, maxInputRate = 23000)
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Didn't create a single split lumi")
return
def test02(self):
"""
_test02_
Test multi lumis
"""
insertClosedLumiBinds = []
for lumi in [1, 2]:
filecount = 1
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "Express",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
self.releaseExpressDAO.execute(binds = { 'RUN' : 1 }, transaction = False)
jobGroups = jobFactory(maxInputEvents = 100, maxInputRate = 23000)
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
if __name__ == '__main__':
unittest.main()
| dmwm/T0 | test/python/T0_t/WMBS_t/JobSplitting_t/Express_t.py | Express_t.py | py | 10,553 | python | en | code | 6 | github-code | 13 |
40905840218 | import numpy as np
from flask import Flask
from flask import jsonify
from flask import request # request可以获取请求参数
from flask import render_template # 使用模板返回页面
import random
import dataGet
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'hello world!'
@app.route('/tem')
def my_tem():
return render_template("index.html")
@app.route('/login')
def my_login():
name = request.values.get('name')
passwd = request.values.get('passwd')
return f'name={name}, passwd={passwd}'
@app.route('/abc')
def my_abc():
id = request.values.get('id')
return f'''
<form action='/login'>
账号:<input name='name' value='{id}'><br>
密码:<input name='passwd'>
<input type='submit'>
</form>
'''
@app.route('/equipmentList', methods=["GET"])
def get_equipment_list():
d = {"code": 0, "status": 200, "result": [], "timestamp": 12345678}
info = dataGet.open_cfg_dat_as_obj("equipmentList.json")
d["result"] = info["result"]
d["timestamp"] = int(dataGet.get_time())
return jsonify(d)
@app.route('/equipmentSetting', methods=["POST", "DELETE"])
def equipment_setting():
req = request.values.get("reqSave")
dataGet.debug_print(req)
res = {"code": 0, "status": 200, "result": req,
"timestamp": int(dataGet.get_time())}
return jsonify(res)
@app.route('/acquisitionSetting', methods=["POST"])
def acquisition_setting():
rr = request.values.get("Rr")
sr = request.values.get("SR")
snr = request.values.get("SNR")
dataGet.debug_print(rr)
dataGet.debug_print(sr)
dataGet.debug_print(snr)
res = {"code": 0, "status": 200, "result": [
{"Rr": rr, "SR": sr, "SNR": snr}], "timestamp": int(dataGet.get_time())}
return jsonify(res)
@app.route('/alarmSetting', methods=["POST"])
def alarm_setting():
down = request.values.get("down")
up = request.values.get("up")
db = request.values.get("db")
dataGet.debug_print(down)
dataGet.debug_print(up)
dataGet.debug_print(db)
res = {"code": 0, "status": 200, "result": [{"down": down, "up": up, "db": db}],
"timestamp": int(dataGet.get_time())}
return jsonify(res)
@app.route('/alarmNum', methods=["GET"])
def alarm_num():
d = {"code": 0, "status": 200, "result": [
{"num": 0}], "timestamp": int(dataGet.get_time())}
num = random.randint(1, 20)
d["result"][0]["num"] = num
return jsonify(d)
@app.route('/getTime', methods=["GET"])
def get_time():
d = {"code": 0, "status": 200, "result": [],
"timestamp": int(dataGet.get_time())}
format_time = dataGet.get_time_format()
d["result"] = [{"time": format_time}]
return jsonify(d)
@app.route('/alarmRecord', methods=["GET"])
def alarm_record():
info = dataGet.open_cfg_dat_as_obj("alarmRecord.json")
res = info
res["timestamp"] = int(dataGet.get_time())
return jsonify(res)
@app.route('/showWave', methods=["GET"])
def show_wave():
# equipment = request.values.get("equipment")
# time = request.values.get("time")
# dataGet.debug_print(equipment)
# dataGet.debug_print(time)
res = {"code": 0, "status": 200, "result": [{"timeDomainDataY": [1.1, 2.2],
"timeDomainDataX": [3.3, 4.4]}], "timestamp": int(dataGet.get_time())}
frq = random.randint(1, 10) * 0.001
res["result"][0]["timeDomainDataX"], res["result"][0]["timeDomainDataY"] = dataGet.gen_audio_wave(
frq)
return jsonify(res)
@app.route('/waveData', methods=["GET"])
def wave_data():
equipment = request.values.get("equipment")
dataGet.debug_print(equipment)
res = {"code": 0, "status": 200, "result": [{"timeDomainDataY": [], "timeDomainDataX": [],
"frequencyDomainDataY": [], "frequencyDomainDataX": [],
"timeFrequencyDataY": [], "timeFrequencyDataX": [],
"timeFrequencyDataZ": []}], "timestamp": int(dataGet.get_time())}
frq = random.randint(1, 10) * 0.001
res["result"][0]["timeDomainDataX"], res["result"][0]["timeDomainDataY"] = dataGet.gen_audio_wave(
frq)
res["result"][0]["frequencyDomainDataX"], res["result"][0]["frequencyDomainDataY"] = dataGet.gen_audio_wave(
frq)
res["result"][0]["timeFrequencyDataX"], res["result"][0]["timeFrequencyDataY"] = dataGet.gen_audio_wave(
frq)
a = []
for i in list(range(0, 30)):
a.append(float(i))
aa = []
for j in range(len(res["result"][0]["timeFrequencyDataX"])):
aa.append(a)
b = []
for i in list(np.random.randint(254, size=30)):
b.append(float(i))
bb = []
for j in range(len(res["result"][0]["timeFrequencyDataX"])):
bb.append(b)
res["result"][0]["timeFrequencyDataY"] = aa
res["result"][0]["timeFrequencyDataZ"] = bb
# print(aa)
# print(bb)
#
# print(len(aa), len(aa[0]), len(bb))
return jsonify(res)
@app.route('/pieChart', methods=["GET"])
def pie_chart():
res = {"code": 0, "status": 200, "result": [{"normal": [57, 0.67], "warning": [15, 0.18], "alarm": [10, 0.12],
"important": [3, 0.03], "critical": [0, 0],
"duration": [3, 15, 25, 30]}], "timestamp": int(dataGet.get_time())}
return jsonify(res)
if __name__ == '__main__':
app.run()
# app.run(host="0.0.0.0", port=80) # 上传服务器时用这行
| StuRuby/python-starter | main.py | main.py | py | 5,613 | python | en | code | 1 | github-code | 13 |
30999406802 | import pathlib
from setuptools import setup
CURRENT_PATH = pathlib.Path(__file__).parent
README = (CURRENT_PATH/"README.md").read_text()
setup(
name="derive_event_pm4py",
version="1.0.1",
description="It derives new events based on rules provided as inputs.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ajayp10/derive_event_pm4py",
author="Ajay Pandi",
author_email="ajay.pandi@rwth-aachen.de",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["derive_event"],
include_package_data=True,
install_requires=['pandas', 'numpy', 'pm4py',
],
entry_points={
"console_scripts": [
"derive=derive_event.derive:main",
]
},
) | ajayp10/derive_event_pm4py | setup.py | setup.py | py | 907 | python | en | code | 0 | github-code | 13 |
10041084189 | import pygame
import random
class ParticlePrinciple:
PARTICLE_EVENT = pygame.USEREVENT + 1
def __init__(self):
self.particles = []
def emit(self, screen: pygame.display, color):
if self.particles:
self.delete_particles()
for particle in self.particles:
particle[0][1] += particle[2][0] # x coordinate
particle[0][0] += particle[2][1] # y coordinate
particle[1] -= 0.2 # particle radius
pygame.draw.circle(screen, color, particle[0], int(particle[1]))
def add_particles(self, emitting_obj: pygame.Rect):
# Particle follows the mouse
pos_x = emitting_obj.x + emitting_obj.size[0] / 2
pos_y = emitting_obj.y + emitting_obj.size[1] / 2
radius = 10
direction_x = random.uniform(-1, 1)
direction_y = random.uniform(-1, 1)
particle_circle = [[pos_x, pos_y], radius, [direction_x, direction_y]]
self.particles.append(particle_circle)
def delete_particles(self):
particle_copy = [particle for particle in self.particles if particle[1] > 0]
self.particles = particle_copy
pygame.time.set_timer(ParticlePrinciple.PARTICLE_EVENT, 30)
class Entity(pygame.Rect):
def __init__(self,
scr: pygame.display,
spawn_x: float, spawn_y: float,
width: float, height: float, color,
label: str = '',
l_font: pygame.font.Font = pygame.font.Font('freesansbold.ttf', 13)):
super().__init__(spawn_x, spawn_y, width, height)
self.screen = scr
self.name = label
self.color = color
self.speed = 0
self.dx = 0
self.dy = 0
self.particles = ParticlePrinciple()
self.font = l_font
self.visual_label = self.font.render(self.name, True, self.color)
def show(self):
self.particles.emit(self.screen, self.color)
self.screen.blit(self.visual_label, (self.x - 12, self.y - 20))
pygame.draw.rect(self.screen, self.color, self)
def set_font(self, l_font: pygame.font.Font):
self.font = l_font
self.visual_label = self.font.render(self.name, True, self.color)
def add_particles(self):
self.particles.add_particles(self)
def __str__(self):
return f'{self.name}: speed={self.speed}, size=({self.size[0]}, {self.size[1]}), color={self.color}'
| DimYfantidis/Mimaras_Movement_Simulator | Classes.py | Classes.py | py | 2,525 | python | en | code | 0 | github-code | 13 |
7592635426 | from unittest import TestCase
from summary.core import TokensSpace
class TokenSpaceTest(TestCase):
def test_shouldComputeDocumentSpaceAsToken2IdMapping(self):
document_tokens = ["quick", "brown", "fox", "jump", "lazy", "dog", "quick", "brown", "fox", "jump", "lazy",
"cat"]
expected_token_2_id_map = {
"brown": 0,
"cat": 1,
"dog": 2,
"fox": 3,
"jump": 4,
"lazy": 5,
"quick": 6,
}
expected_token_2_frequency_map = {
"brown": 2,
"cat": 1,
"dog": 1,
"fox": 2,
"jump": 2,
"lazy": 2,
"quick": 2,
}
space = TokensSpace()
actual_token_maps = space.compute_token_2_id_mapping(document_tokens)
self.assertEquals(actual_token_maps, (expected_token_2_frequency_map, expected_token_2_id_map))
def test_shouldCreateVectorsFromTokens(self):
document_tokens = ["quick", "brown", "fox", "jump", "lazy", "dog", "quick", "brown", "fox", "jump", "lazy",
"cat"]
document_space = TokensSpace(tokens=document_tokens)
expected_vector = [2, 0, 0, 2, 0, 0, 2]
sentence_tokens = ["quick", "brown", "fox"]
actual_vector = document_space.vectorize(sentence_tokens)
self.assertEquals(actual_vector, expected_vector)
| rajasoun/nlp | nlp_framework/tests/summary/core/document_space_test.py | document_space_test.py | py | 1,433 | python | en | code | 0 | github-code | 13 |
24508046650 | # https://finance.naver.com/sise/ 에 요청을 보내서 응답을 받아온다.
import requests
import bs4
url = "https://finance.naver.com/sise/"
response = requests.get(url)
# print(response.text)
# 받아온 response.text를 파이썬이 알아먹을 수 있도록 예쁘게 만들어준다. parser는 html.parser를 사용한다.
html = bs4.BeautifulSoup(response.text, "html.parser")
# print(html)
kospi = html.select_one('#KOSPI_now')
print(kospi.text)
| khs123456/TIL | day1/kospi.py | kospi.py | py | 464 | python | ko | code | 0 | github-code | 13 |
1047687779 | """test_people
Tests for people controller
created 28-oct-2019 by richb@instantlinux.net
"""
import pytest
from unittest import mock
import test_base
class TestPeople(test_base.TestBase):
def setUp(self):
self.authorize()
@pytest.mark.slow
def test_add_and_fetch_person(self):
record = dict(name='Teddy Johnson', identity='tj@conclave.events')
expected = dict(
lists=[], status='active', privacy='public',
referrer_id=self.test_uid, rbac='dru', **record)
contact = dict(count=1, items=[dict(
info=record['identity'], label='home', privacy='member',
type='email', muted=False, rank=1, modified=None, carrier=None,
owner=record['name'], status='unconfirmed')])
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 201)
id = response.get_json()['id']
response = self.call_endpoint('/person/%s' % id, 'get')
result = response.get_json()
del result['created']
expected['id'] = id
self.assertEqual(result, expected)
response = self.call_endpoint('/contact?filter={"info":"%s"}' %
record['identity'], 'get')
result = response.get_json()
contact['items'][0]['uid'] = id
del result['items'][0]['id']
del result['items'][0]['created']
# TODO validate rbac after it's fixed
del result['items'][0]['rbac']
self.assertEqual(result, contact)
def test_update_person(self):
record = dict(name='Sarah Lee', identity='sarah@conclave.events')
updated = dict(name='Sarah Lee', privacy='invitee')
expected = dict(
lists=[], status='active', privacy='public',
referrer_id=self.test_uid, rbac='dru', **record)
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 201)
id = response.get_json()['id']
response = self.call_endpoint('/person/%s' % id, 'put', data=dict(
record, **updated))
self.assertEqual(response.status_code, 200, 'put failed message=%s' %
response.get_json().get('message'))
response = self.call_endpoint('/person/%s' % id, 'get')
result = response.get_json()
del result['created']
del result['modified']
expected.update(updated)
expected['id'] = id
self.assertEqual(result, expected)
def test_person_delete(self):
record = dict(name='President Number45',
identity='occupant@whitehouse.gov')
expected = dict(
status='disabled', privacy='public', lists=[],
referrer_id=self.test_uid, rbac='dru', **record)
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 201)
id = response.get_json()['id']
response = self.call_endpoint('/person/%s' % id, 'delete')
self.assertEqual(response.status_code, 204)
# The record should still exist, with disabled status
response = self.call_endpoint('/person/%s' % id, 'get')
result = response.get_json()
del result['created']
expected.update(record)
expected['id'] = id
self.assertEqual(result, expected)
# Force delete -- should no longer exist
response = self.call_endpoint('/person/%s?force=true' % id, 'delete')
self.assertEqual(response.status_code, 204)
response = self.call_endpoint('/person/%s' % id, 'get')
result = response.get_json()
self.assertEqual(response.status_code, 404)
def test_update_primary_contact(self):
record = dict(name='Witness Protected',
identity='oldaccount@conclave.events')
updated = dict(info='newaccount@conclave.events', type='email')
expected = dict(
carrier=None, rank=1, status='unconfirmed', rbac='ru',
label='home', privacy='member', muted=False,
owner=record['name'], **updated)
# Create a new person and fetch the new contact record
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 201)
uid = response.get_json()['id']
response = self.call_endpoint('/contact?filter={"uid":"%s"}' % uid,
'get')
self.assertEqual(response.status_code, 200, 'get failed message=%s' %
response.get_json().get('message'))
cid = response.get_json()['items'][0]['id']
# Update the contact record and confirm contents
response = self.call_endpoint('/contact/%s' % cid, 'put',
data=dict(uid=uid, **updated))
self.assertEqual(response.status_code, 200, 'put failed message=%s' %
response.get_json().get('message'))
response = self.call_endpoint('/contact/%s' % cid, 'get')
self.assertEqual(response.status_code, 200, 'get failed message=%s' %
response.get_json().get('message'))
result = response.get_json()
del result['created']
del result['modified']
self.assertEqual(result, dict(id=cid, uid=uid, **expected))
self.mock_messaging.assert_has_calls([
mock.call(to=cid, template='contact_add', token=mock.ANY,
type='email')])
# Now make sure the identity also got updated
response = self.call_endpoint('/person/%s' % uid, 'get')
self.assertEqual(response.status_code, 200, 'get failed message=%s' %
response.get_json().get('message'))
result = response.get_json()
self.assertEqual(result['identity'], updated['info'])
def test_person_update_disallowed_upon_confirm(self):
"""One person refers another, by invoking POST to /person with
a name and email address. Update/delete is allowed by either
person until the referred person confirms or RSVPs to an event.
"""
invited = dict(name='Phil Acolyte', identity='acolyte@conclave.events')
updated = dict(name='El Diablo', privacy='invitee')
response = self.call_endpoint('/person', 'post', data=invited)
self.assertEqual(response.status_code, 201)
uid = response.get_json()['id']
response = self.call_endpoint('/contact?filter={"info":"%s"}' %
invited['identity'], 'get')
result = response.get_json()
cid = result['items'][0]['id']
response = self.call_endpoint('/contact/confirmation_get/%s' %
cid, 'get')
self.assertEqual(response.status_code, 200)
self.mock_messaging.assert_has_calls([
mock.call(to=cid, template='contact_add', token=mock.ANY,
type='email')])
token = response.get_json()['token']
response = self.call_endpoint('/contact/confirm/%s' % token, 'post')
self.assertEqual(response.status_code, 200)
response = self.call_endpoint('/person/%s' % uid, 'put', data=dict(
invited, **updated))
self.assertEqual(response.status_code, 403)
self.assertEqual(response.get_json().get('message'), 'access denied')
def test_add_invalid(self):
record = dict(name='Rogue',
identity='r@conclave.events', id='x-notvalid')
expected = dict(message='id is a read-only property',
title='Bad Request')
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 405)
self.assertEqual(response.get_json(), expected)
def test_add_duplicate_person(self):
record = dict(name='Mary Jones', identity='maryj@conclave.events')
expected = dict(
lists=[], status='active', privacy='public',
referrer_id=self.test_uid, rbac='dru', **record)
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 201)
id = response.get_json()['id']
response = self.call_endpoint('/person/%s' % id, 'get')
result = response.get_json()
del result['created']
expected['id'] = id
self.assertEqual(result, expected)
response = self.call_endpoint('/person', 'post', data=record)
self.assertEqual(response.status_code, 405)
result = response.get_json()
del result['data']
self.assertEqual(result, dict(message='duplicate or other conflict'))
# TODO - test that access disabled after contact-confirm
| instantlinux/apicrud | tests/test_people.py | test_people.py | py | 8,829 | python | en | code | 2 | github-code | 13 |
7512718219 | # -*- coding:utf-8 -*-
# author:peng
# Date:2023/4/4 11:50
import time
import cv2
import numpy as np
from flask import request, Flask, render_template
from flask_cors import CORS
from Mtcnn_interface import mtcnn_detector, face_recognition, list_to_json
app = Flask(__name__)
# 允许跨越访问
CORS(app)
@app.route("/recognition", methods=['POST'])
def recognition():
start_time1 = time.time()
upload_file = request.files['image']
if upload_file:
try:
img = cv2.imdecode(np.frombuffer(upload_file.read(), np.uint8), 1)
except:
return str({"error": 2, "msg": "this file is not image"})
try:
info_name, dis, info_bbox, info_landmarks = face_recognition(img)
if len(info_name) == 0:
return str({"error": 3, "msg": "image not have face"})
except:
return str({"error": 3, "msg": "image not have face"})
# 封装识别结果
data_faces = []
for i in range(len(info_name)):
data_faces.append(
{"name": info_name[i], "distance": dis[i],
"bbox": list_to_json(np.around(info_bbox[i], decimals=2).tolist()),
"landmarks": list_to_json(np.around(info_landmarks[i], decimals=2).tolist())})
data = str({"code": 0, "msg": "success", "data": data_faces}).replace("'", '"')
print('duration:[%.0fms]' % ((time.time() - start_time1) * 1000), data)
return data
else:
return str({"error": 1, "msg": "file is None"})
# 注册人脸接口
@app.route("/register", methods=['POST'])
def register():
global faces_db
upload_file = request.files['image']
user_name = request.values.get("name")
if upload_file:
try:
image = cv2.imdecode(np.frombuffer(upload_file.read(), np.uint8), cv2.IMREAD_UNCHANGED)
boxes, probs, landmarks = mtcnn_detector.detect(image, landmarks=True)
if probs.shape[0] is not 0:
if probs.shape[0] == 1:
mtcnn_detector(image, save_path='./face_save/{}.jpg'.format(user_name))
return str({"code": 0, "msg": "success"})
return str({"code": 3, "msg": "image not or much face"})
except:
return str({"code": 2, "msg": "this file is not image or not face"})
else:
return str({"code": 1, "msg": "file is None"})
@app.route('/')
def home():
return render_template("index.html")
if __name__ == '__main__':
app.run(host='localhost', port=5000)
| OpenHUTB/customs | mtcnn-facenet-pytorch/server_main.py | server_main.py | py | 2,553 | python | en | code | 5 | github-code | 13 |
17041222854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.JointAccountQuotaDTO import JointAccountQuotaDTO
from alipay.aop.api.domain.AuthorizedRuleDTO import AuthorizedRuleDTO
class AlipayFundJointaccountRuleModifyModel(object):
def __init__(self):
self._account_id = None
self._account_name = None
self._account_quota = None
self._agreement_no = None
self._authorized_rule = None
self._biz_scene = None
self._product_code = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def account_name(self):
return self._account_name
@account_name.setter
def account_name(self, value):
self._account_name = value
@property
def account_quota(self):
return self._account_quota
@account_quota.setter
def account_quota(self, value):
if isinstance(value, list):
self._account_quota = list()
for i in value:
if isinstance(i, JointAccountQuotaDTO):
self._account_quota.append(i)
else:
self._account_quota.append(JointAccountQuotaDTO.from_alipay_dict(i))
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def authorized_rule(self):
return self._authorized_rule
@authorized_rule.setter
def authorized_rule(self, value):
if isinstance(value, AuthorizedRuleDTO):
self._authorized_rule = value
else:
self._authorized_rule = AuthorizedRuleDTO.from_alipay_dict(value)
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.account_name:
if hasattr(self.account_name, 'to_alipay_dict'):
params['account_name'] = self.account_name.to_alipay_dict()
else:
params['account_name'] = self.account_name
if self.account_quota:
if isinstance(self.account_quota, list):
for i in range(0, len(self.account_quota)):
element = self.account_quota[i]
if hasattr(element, 'to_alipay_dict'):
self.account_quota[i] = element.to_alipay_dict()
if hasattr(self.account_quota, 'to_alipay_dict'):
params['account_quota'] = self.account_quota.to_alipay_dict()
else:
params['account_quota'] = self.account_quota
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.authorized_rule:
if hasattr(self.authorized_rule, 'to_alipay_dict'):
params['authorized_rule'] = self.authorized_rule.to_alipay_dict()
else:
params['authorized_rule'] = self.authorized_rule
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundJointaccountRuleModifyModel()
if 'account_id' in d:
o.account_id = d['account_id']
if 'account_name' in d:
o.account_name = d['account_name']
if 'account_quota' in d:
o.account_quota = d['account_quota']
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'authorized_rule' in d:
o.authorized_rule = d['authorized_rule']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'product_code' in d:
o.product_code = d['product_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFundJointaccountRuleModifyModel.py | AlipayFundJointaccountRuleModifyModel.py | py | 4,994 | python | en | code | 241 | github-code | 13 |
14273589266 | #python
#This Script Create a weight map based on the length of a curve
import lx, lxu.select, lxu.object
#Get selected Mesh Item
meshItem = lxu.select.ItemSelection().current()[0]
#Get the current scene and create a channel read object
scene = meshItem.Context()
chanRead = scene.Channels(None, 0)
#Lookup the 'crvGroup' channel of the mesh item, create a value object from it
crvChan = meshItem.ChannelLookup('crvGroup')
crvValObj = chanRead.ValueObj(meshItem, crvChan)
#Create a Value Reference object out of the Value object from our curve
valRef = lxu.object.ValueReference(crvValObj)
#Generate an unknown object using the Value Reference
vrObj = valRef.GetObject()
#Cast the unknown object as a curve group (since we know that's what it actually is)
curveGroup = lxu.object.CurveGroup(vrObj)
#Get the first curve of the mesh from the curve group object
for c in xrange(curveGroup.Count()):
curve = curveGroup.ByIndex(c)
#print the curve's Length() to the event log
lx.out(curve.Length()) | Tilapiatsu/modo-tila_customconfig | Scripts/CreateCurveLengthWMap.py | CreateCurveLengthWMap.py | py | 1,006 | python | en | code | 2 | github-code | 13 |
44726269575 | import kgit, sys, os
#
# list_w
#
# Used to list all current workspaces stored in the data store
def list_w():
workspaces = kgit.get_file("workspaces")
if workspaces == "" or workspaces == "\n":
kgit.out("No workspaces available")
return
lines = workspaces.split("\n")
kgit.out("===============================================")
for line in lines:
if line == "":
continue
kgit.out(line)
kgit.out("===============================================")
#
# add_w
#
# Will add a workspace to the data store
def add_w():
workspaces = kgit.get_file("workspaces")
if len(sys.argv) < 4:
workspace = raw_input("Workspace: ")
else:
workspace = sys.argv[3]
lines = workspaces.split("\n")
for line in lines:
if line == workspace:
kgit.out("Workspace exists - Not adding duplicate")
return
workspace = workspace.replace("~", os.path.expanduser('~'))
workspaces += workspace + "\n"
kgit.out("Adding workspace: " + workspace)
kgit.out("Workspaces are stored in ~/.kgit/workspaces")
kgit.write_data(workspaces, "workspaces")
#
# delete_w
#
# Will delete a workspace by criteria, where criteria is any string that can be matched on a record
def delete_w():
workspaces = kgit.get_file("workspaces")
# Criteria
if len(sys.argv) < 4:
criteria = raw_input("Criteria: ")
else:
criteria = sys.argv[3]
lines = workspaces.split("\n")
workspacesToWrite = ""
delta = False
i = -1
llen = len(lines)
for line in lines:
i += 1
if criteria in line:
delta = True
kgit.out("Removing: " + line)
if i != (llen - 2):
workspacesToWrite += "\n"
continue
workspacesToWrite += line
if not delta:
kgit.out("No workspaces matching criteria found. Unable to delete.")
return
yn = raw_input("Continue? [y/n] : ")
if yn != "y":
return
# Okay we have confirmed, lets save
kgit.write_data(workspacesToWrite, "workspaces")
list_w()
| krisnova/kgit | kgit/workspaces.py | workspaces.py | py | 2,140 | python | en | code | 1 | github-code | 13 |
12727869635 | from selenium.webdriver import Firefox
from contextlib import closing
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
URL = 'http://fanserials.tv/index.php?do=search'
series = [
# ('name', 'season', 'series')
('гримм', '4', '21')
]
def find_series_fanserials(browser, data):
name = data[0] + ' ' + data[1] + ' сезон ' + data[2] + ' серия'
rv = ''
elem = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.NAME, 'story'))
)
# elem.click()
elem.clear()
elem.send_keys(name)
elem.send_keys(Keys.RETURN)
try:
articles = browser.find_elements_by_css_selector('article')
for article in articles:
h5 = article.find_element_by_tag_name('h5')
if (name.lower() in h5.text.lower()):
a = h5.find_element_by_partial_link_text(data[1] + ' сезон')
rv = a.text + ' | ' + a.get_attribute('href')
except NoSuchElementException:
pass
if rv:
print('\n', rv)
else:
print('.', end='')
def main():
display = Display(visible=0, size=(800, 600))
display.start()
with closing(Firefox()) as browser:
print('\n[' + URL +']')
browser.get(URL)
for data in series:
find_series_fanserials(browser, data)
display.stop()
if __name__ == "__main__":
main()
| idfumg/series-finder-selenium | run.py | run.py | py | 1,686 | python | en | code | 0 | github-code | 13 |
28880432025 | from .constants import BLACK, ROWS, COLS, SQUARE_SIZE, WHITE, DARK_BEIGE, LIGHT_BEIGE
from .piece import Piece
import pygame
import math
import random
class Board:
def __init__(self):
self.board = []
self.selected_piece = None
self.black_left = self.white_left = 12
self.black_kings = 0
self.white_kings = 0
self.score = 0
self.visits = 0
self.parent = None
self.parent_action = None
self.children = []
self.create_board()
def reset_mcts(self):
"""
Reset all variables relevant to Monte Carlo Tree Search
"""
self.parent = None
self.score = 0
self.visits = 0
self.parent = None
self.parent_action = None
self.children = []
def set_parent(self, board):
self.parent = board
def get_parent(self):
return self.parent
def increment_visit(self):
self.visits += 1
def add_to_total_score(self, score):
self.score += score
def add_children(self, children):
for c in children:
c.set_parent(self)
self.children.extend(children)
def get_children_len(self):
return self.children.__len__()
def get_children(self):
return self.children
def get_average_score(self):
return self.score // self.visits
def get_score(self):
return self.score
def get_visits(self):
return self.visits
def get_best_child(self, searches):
"""
Get child with best score. (Monte Carlo Tree Search)
UCB = (Average_Score) + (2 * sqrt((ln(N) / visits)
:param searches: N
:return: Child with best score
"""
if self.children.__len__() == 0:
return None
best = float('-inf')
best_child = None
all_zero = True
for c in self.children:
all_zero = False
if c.get_visits() == 0:
best = float('inf')
best_child = c
else:
ln = math.log(searches)
ucb = c.get_average_score() + (2 * math.sqrt((ln / c.get_visits())))
best = max(best, ucb)
if best > 0:
all_zero = False
if best == ucb:
best_child = c
if all_zero:
return random.choice(self.children)
return best_child
def draw_board(self, window):
"""
Create the initial 8x8 board visual in the game window
:param window: pygame display window (800x800 pixels)
"""
window.fill(DARK_BEIGE)
for row in range(ROWS):
for col in range(row % 2, COLS, 2):
pygame.draw.rect(window, LIGHT_BEIGE, (row * SQUARE_SIZE, col * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))
def create_board(self):
"""
Initialize board array (2D) with empty spaces represented by 0 otherwise a new piece
Adhering to legal starting positions for checkers
"""
for row in range(ROWS):
self.board.append([])
for col in range(COLS):
if col % 2 == ((row + 1) % 2):
if row < 3:
piece = Piece(row, col, WHITE)
self.board[row].append(piece)
elif row > 4:
piece = Piece(row, col, BLACK)
self.board[row].append(piece)
else:
self.board[row].append(0)
else:
self.board[row].append(0)
def draw(self, window):
"""
Create the initial 8x8 board visual and pieces in the game window
:param window: pygame display window (800x800 pixels)
"""
self.draw_board(window)
for row in range(ROWS):
for col in range(COLS):
piece = self.board[row][col]
if piece != 0:
piece.draw(window)
def white_heuristic_eval_1(self):
"""
Heuristic evaluation focusing on piece count
:return: Heuristic evaluation
"""
return self.white_left - self.black_left
def white_heuristic_eval_2(self):
"""
Heuristic evaluation focusing on overall piece count and number of kings
:return: Heuristic evaluation
"""
return self.white_left - self.black_left + (self.white_kings * .99 - self.black_kings * .99)
def white_heuristic_eval_3(self):
"""
Heuristic evaluation focusing on overall piece count, number of kings, and positioning on the board
:return: Heuristic evaluation
"""
score = 0
for row in self.board:
for piece in row:
if piece != 0 and piece.color == WHITE:
if not piece.king:
if piece.row > 3:
score += 2 + piece.row
elif piece.row == 0:
score += 0
else:
score += 1 + piece.row
else:
if self.get_valid_moves(piece).__len__() == 0:
score -= 100
else:
score += 21
if piece != 0 and piece.color == BLACK:
if not piece.king:
if piece.row < 4:
score -= 3 + (7 - piece.row)
else:
score -= 2 + (7 - piece.row)
else:
score -= 21
return score
def black_heuristic_eval_1(self):
"""
Heuristic evaluation focusing on piece count
:return: Heuristic evaluation
"""
return self.black_left - self.white_left
def black_heuristic_eval_2(self):
"""
Heuristic evaluation focusing on overall piece count and number of kings
:return: Heuristic evaluation
"""
return self.black_left - self.white_left + (self.black_kings * .99 - self.white_kings * .99)
def black_heuristic_eval_3(self):
"""
Heuristic evaluation focusing on overall piece count, number of kings, and positioning on the board
:return: Heuristic evaluation
"""
score = 0
for row in self.board:
for piece in row:
if piece != 0 and piece.color == BLACK:
if not piece.king:
if piece.row < 4:
score += 2 + (7 - piece.row)
else:
score += 1 + (7 - piece.row)
else:
if self.get_valid_moves(piece).__len__() == 0:
score -= 100
else:
score += 21
if piece != 0 and piece.color == WHITE:
if not piece.king:
if piece.row > 5:
score -= 3 + piece.row
else:
score -= 2 + piece.row
else:
score -= 21
return score
def get_all_pieces(self, color):
"""
Find all pieces in the board array of certain color
:param color: Color of wanted pieces
:return: All pieces of color
"""
pieces = []
for row in self.board:
for piece in row:
if piece != 0 and piece.color == color:
pieces.append(piece)
return pieces
def get_all_pieces_move(self, color):
"""
Find all pieces in the board array of certain color and have at least one valid move
(not blocked)
:param color: Color of wanted pieces
:return: All pieces of color and have at least one available move
"""
pieces = []
for row in self.board:
for piece in row:
if piece != 0 and piece.color == color and self.get_valid_moves(piece).__len__() > 0:
pieces.append(piece)
return pieces
def move(self, piece, row, col):
"""
Move selected piece within our board array to a new index
If move results in a king (piece meeting row 0 or ROW and is corresponding color)
make piece a king and add to our king count
:param piece: piece to move
:param row: new row position
:param col: new column position
"""
self.board[piece.row][piece.col], self.board[row][col] = self.board[row][col], self.board[piece.row][piece.col]
piece.move(row, col)
if row == ROWS - 1 or row == 0:
if piece.color == WHITE and not piece.king:
self.white_kings += 1
piece.make_king()
elif piece.color == BLACK and not piece.king:
self.black_kings += 1
piece.make_king()
def remove(self, pieces):
"""
Remove piece or pieces from board array and update counts
:param pieces: piece or pieces to be removed
"""
for piece in pieces:
self.board[piece.row][piece.col] = 0
if piece != 0:
if piece.color == BLACK:
if piece.king:
self.black_left -= 1
self.black_kings -= 1
else:
self.black_left -= 1
else:
if piece.king:
self.white_left -= 1
self.white_kings -= 1
else:
self.white_left -= 1
def winner(self):
"""
Check if win condition met (one color has no pieces left)
:return:
"""
if self.black_left <= 0:
return WHITE
elif self.white_left <= 0:
return BLACK
return None
def get_piece(self, row, col):
return self.board[row][col]
def get_valid_moves(self, piece):
"""
Get all possible moves for a piece
:param piece: piece to search
:return: available moves for a piece {(row,col) -> []}
"""
moves = {}
left = piece.col - 1
right = piece.col + 1
row = piece.row
if piece.color == BLACK or piece.king:
moves.update(self._traverse_left(row - 1, max(row - 3, -1), -1, piece.color, left))
moves.update(self._traverse_right(row - 1, max(row - 3, -1), -1, piece.color, right))
if piece.color == WHITE or piece.king:
moves.update(self._traverse_left(row + 1, min(row + 3, ROWS), 1, piece.color, left))
moves.update(self._traverse_right(row + 1, min(row + 3, ROWS), 1, piece.color, right))
return moves
def _traverse_left(self, start, stop, step, color, left, skipped=[]):
moves = {}
last = []
for r in range(start, stop, step):
if left < 0:
break
current = self.board[r][left]
if current == 0:
if skipped and not last:
break
elif skipped:
moves[(r, left)] = last + skipped
else:
moves[(r, left)] = last
if last:
if step == -1:
row = max(r - 3, -1)
else:
row = min(r + 3, ROWS)
if skipped:
moves.update(self._traverse_left(r + step, row, step, color, left - 1, skipped=last + skipped))
moves.update(self._traverse_right(r + step, row, step, color, left + 1, skipped=last + skipped))
else:
moves.update(self._traverse_left(r + step, row, step, color, left - 1, skipped=last))
moves.update(self._traverse_right(r + step, row, step, color, left + 1, skipped=last))
break
elif current.color == color:
break
else:
last = [current]
left -= 1
return moves
def _traverse_right(self, start, stop, step, color, right, skipped=[]):
moves = {}
last = []
for r in range(start, stop, step):
if right >= COLS:
break
current = self.board[r][right]
if current == 0:
if skipped and not last:
break
elif skipped:
moves[(r, right)] = last + skipped
else:
moves[(r, right)] = last
if last:
if step == -1:
row = max(r - 3, -1)
else:
row = min(r + 3, ROWS)
if skipped:
moves.update(self._traverse_left(r + step, row, step, color, right - 1, skipped=last + skipped))
moves.update(
self._traverse_right(r + step, row, step, color, right + 1, skipped=last + skipped))
else:
moves.update(self._traverse_left(r + step, row, step, color, right - 1, skipped=last))
moves.update(self._traverse_right(r + step, row, step, color, right + 1, skipped=last))
break
elif current.color == color:
break
else:
last = [current]
right += 1
return moves
| mh022396/Checkers-AI | src/checkers/board.py | board.py | py | 13,828 | python | en | code | 0 | github-code | 13 |
74760763537 | import streamlit as st
from streamlit_image_coordinates import streamlit_image_coordinates
from PIL import Image, ImageDraw
st.set_page_config(layout="wide")
def get_ellipse_coords(point: tuple[int, int]) -> tuple[int, int, int, int]:
center = point
radius = 10
return (
center[0] - radius,
center[1] - radius,
center[0] + radius,
center[1] + radius,
)
locations_names = [
"Paris",
"Belfort",
"Rouen",
"Rennes",
"Le Mans",
"Troyes",
"Poitiers",
"Limoges",
"Villeurbanne",
"Grenoble",
"Bordeaux",
"Toulouse",
"Montpellier",
"Avignon",
"Perpignan",
]
def run(
basemap_path,
):
if "points" not in st.session_state:
st.session_state["points"] = []
if "idx" not in st.session_state:
st.session_state["idx"] = 0
with Image.open(basemap_path) as img:
draw = ImageDraw.Draw(img)
# Draw an ellipse at each coordinate in points
for point in st.session_state["points"]:
coords = get_ellipse_coords(point)
draw.ellipse(coords, fill="red")
st.text("Pointer " + locations_names[st.session_state.idx])
value = streamlit_image_coordinates(img, key="pil")
if value is not None:
point = value["x"], value["y"]
st.text(point)
if point not in st.session_state["points"]:
st.session_state["points"].append(point)
st.session_state.idx += 1
if len(st.session_state.points) == len(locations_names):
loc2xy = {
loc: point
for loc, point in zip(locations_names, st.session_state.points)
}
import json
with open(
"/Users/f.weber/tmp-fweber/heating/aux_maps/ref_carte_name2xy.json",
"w",
) as f:
json.dump(loc2xy, f)
st.stop()
# st.experimental_rerun()
if __name__ == "__main__":
run("/Users/f.weber/tmp-fweber/heating/aux_maps/ref_carte_drias_f1.png")
| francoisWeber/heating-planner | display_map_calibration.py | display_map_calibration.py | py | 2,197 | python | en | code | 0 | github-code | 13 |
2856430328 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from valan.streetview_common import streetview_constants
TD_BASELINE_AGENT_PARAMS = streetview_constants.BaselineAgentParams(
# Actual vocab size is 4280, we add 1 as vocab_id=0 can not be used since we
# are masking for RNN performance.
VOCAB_SIZE=4280 + 1,
INSTRUCTION_LSTM_DIM=256,
TEXT_EMBED_DIM=32,
TIMESTEP_EMBED_DIM=32,
ACTION_EMBED_DIM=16,
MAX_AGENT_ACTIONS=55,
L2_SCALE=0.0,
)
TD_PANO_AGENT_PARAMS = streetview_constants.PanoramicAgentParams(
VOCAB_SIZE=4280 + 1,
INSTRUCTION_LSTM_DIM=256,
GROUNDING_EMBEDDING_DIM=256, # Unused in Touchdown
FEATURE_H=3,
FEATURE_W=18,
# FEATURE_C=64, # Starburst v4
FEATURE_C=606, # Bottleneck v7
# FEATURE_H=1,
# FEATURE_W=8,
# FEATURE_C=2048, # ResNet50
LINGUNET_H=64,
LINGUNET_G=16,
TIME_LSTM_DIM=32,
TEXT_EMBED_DIM=32,
TIMESTEP_EMBED_DIM=32,
ACTION_EMBED_DIM=16,
MAX_AGENT_ACTIONS=55,
L2_SCALE=0.0,
)
TD_PANO_AGENT_PARAMS_BIG = streetview_constants.PanoramicAgentParams(
VOCAB_SIZE=30522+1,
INSTRUCTION_LSTM_DIM=256,
GROUNDING_EMBEDDING_DIM=256,
FEATURE_H=3,
FEATURE_W=18,
FEATURE_C=606,
LINGUNET_H=128,
LINGUNET_G=64,
TIME_LSTM_DIM=32,
TEXT_EMBED_DIM=32,
TIMESTEP_EMBED_DIM=32,
ACTION_EMBED_DIM=16,
MAX_AGENT_ACTIONS=55,
L2_SCALE=0.0,
)
| google-research/valan | touchdown/constants.py | constants.py | py | 1,481 | python | en | code | 69 | github-code | 13 |
70858437138 | import atexit
import pathlib
import warnings
from typing import Any, Callable, Dict, List, Tuple, Union
# 3rd party
from apeye.requests_url import RequestsURL
from apeye.slumber_url import HttpNotFoundError, SlumberURL
from apeye.url import URL
from domdf_python_tools.paths import PathPlus
from domdf_python_tools.typing import PathLike
from packaging.requirements import InvalidRequirement
from packaging.specifiers import SpecifierSet
from packaging.tags import Tag, sys_tags
from packaging.utils import parse_wheel_filename
from packaging.version import Version
from typing_extensions import TypedDict
# this package
from shippinglabel import normalize
from shippinglabel.requirements import operator_symbols, read_requirements
__all__ = [
"PYPI_API",
"get_metadata",
"get_latest",
"bind_requirements",
"get_pypi_releases",
"get_releases_with_digests",
"get_file_from_pypi",
"FileURL",
"get_sdist_url",
"get_wheel_url",
"get_wheel_tag_mapping",
]
warnings.warn(
"shippinglabel.pypi is deprecated and will be removed in v2.0.0.\n"
"Please use shippinglabel-pypi instead.",
DeprecationWarning,
)
PYPI_API = SlumberURL("https://pypi.org/pypi/", timeout=10)
"""
Instance of :class:`apeye.slumber_url.SlumberURL` which points to the PyPI REST API.
.. versionchanged:: 0.3.0 Now an instance of :class:`apeye.slumber_url.SlumberURL`.
"""
atexit.register(PYPI_API.session.close)
class FileURL(TypedDict):
"""
:class:`typing.TypedDict` representing the output of :func:`~.get_releases_with_digests`.
.. versionadded:: 0.6.1
"""
url: str
digest: str
def get_metadata(pypi_name: str) -> Dict[str, Any]:
"""
Returns metadata for the given project on PyPI.
.. versionadded:: 0.2.0
:param pypi_name:
:raises:
* :exc:`packaging.requirements.InvalidRequirement` if the project cannot be found on PyPI.
* :exc:`apeye.slumber_url.exceptions.HttpServerError` if an error occurs when communicating with PyPI.
"""
query_url: SlumberURL = PYPI_API / pypi_name / "json"
try:
return query_url.get()
except HttpNotFoundError:
raise InvalidRequirement(f"No such project {pypi_name!r}") from None
def get_latest(pypi_name: str) -> str:
"""
Returns the version number of the latest release on PyPI for the given project.
.. versionadded:: 0.2.0
:param pypi_name:
:raises:
* :exc:`packaging.requirements.InvalidRequirement` if the project cannot be found on PyPI.
* :exc:`apeye.slumber_url.exceptions.HttpServerError` if an error occurs when communicating with PyPI.
"""
return str(get_metadata(pypi_name)["info"]["version"])
def bind_requirements(
filename: PathLike,
specifier: str = ">=",
normalize_func: Callable[[str], str] = normalize,
) -> int:
"""
Bind unbound requirements in the given file to the latest version on PyPI, and any later versions.
.. versionadded:: 0.2.0
:param filename: The requirements.txt file to bind requirements in.
:param specifier: The requirement specifier symbol to use.
:param normalize_func: Function to use to normalize the names of requirements.
.. versionchanged:: 0.2.3 Added the ``normalize_func`` keyword-only argument.
:return: ``1`` if the file was changed; ``0`` otherwise.
"""
if specifier not in operator_symbols:
raise ValueError(f"Invalid specifier {specifier!r}")
ret = 0
filename = PathPlus(filename)
requirements, comments, invalid_lines = read_requirements(
filename,
include_invalid=True,
normalize_func=normalize_func,
)
for req in requirements:
if req.url:
continue
if not req.specifier:
ret |= 1
req.specifier = SpecifierSet(f"{specifier}{get_latest(req.name)}")
sorted_requirements = sorted(requirements)
buf: List[str] = [*comments, *invalid_lines, *(str(req) for req in sorted_requirements)]
if buf != list(filter(lambda x: x != '', filename.read_lines())):
ret |= 1
filename.write_lines(buf)
return ret
def get_pypi_releases(pypi_name: str) -> Dict[str, List[str]]:
"""
Returns a dictionary mapping PyPI release versions to download URLs.
.. versionadded:: 0.3.0
:param pypi_name: The name of the project on PyPI.
:raises:
* :exc:`packaging.requirements.InvalidRequirement` if the project cannot be found on PyPI.
* :exc:`apeye.slumber_url.exceptions.HttpServerError` if an error occurs when communicating with PyPI.
"""
pypi_releases = {}
for release, release_data in get_releases_with_digests(pypi_name).items():
pypi_releases[release] = [file["url"] for file in release_data]
return pypi_releases
def get_releases_with_digests(pypi_name: str) -> Dict[str, List[FileURL]]:
"""
Returns a dictionary mapping PyPI release versions to download URLs and the sha256sum of the file contents.
.. versionadded:: 0.6.0
:param pypi_name: The name of the project on PyPI.
:raises:
* :exc:`packaging.requirements.InvalidRequirement` if the project cannot be found on PyPI.
* :exc:`apeye.slumber_url.exceptions.HttpServerError` if an error occurs when communicating with PyPI.
"""
pypi_releases = {}
for release, release_data in get_metadata(pypi_name)["releases"].items():
release_urls: List[FileURL] = []
for file in release_data:
release_urls.append({"url": file["url"], "digest": file["digests"]["sha256"]})
pypi_releases[release] = release_urls
return pypi_releases
def get_file_from_pypi(url: Union[URL, str], tmpdir: PathLike) -> None:
"""
Download the file with the given URL into the given (temporary) directory.
.. versionadded:: 0.6.0
:param url: The URL to download the file from.
:param tmpdir: The (temporary) directory to store the downloaded file in.
"""
should_close = False
if not isinstance(url, RequestsURL):
url = RequestsURL(url)
should_close = True
filename = url.name
r = url.get()
if r.status_code != 200: # pragma: no cover
raise OSError(f"Unable to download '{filename}' from PyPI.")
(pathlib.Path(tmpdir) / filename).write_bytes(r.content)
if should_close:
url.session.close()
def get_sdist_url(
name: str,
version: Union[str, int, Version],
strict: bool = False,
) -> str:
"""
Returns the URL of the project's source distribution on PyPI.
.. versionadded:: 0.13.0
:param name: The name of the project on PyPI.
:param version:
:param strict: Causes a :exc:`ValueError` to be raised if no sdist is found,
rather than retuning a wheel.
.. attention::
If no source distribution is found this function may return a wheel or "zip" sdist
unless ``strict`` is :py:obj:`True`.
.. versionchanged:: 0.15.0 Added the ``strict`` argument.
"""
releases = get_pypi_releases(str(name))
version = str(version)
if version not in releases:
raise InvalidRequirement(f"Cannot find version {version} on PyPI.")
download_urls = releases[version]
if not download_urls:
raise ValueError(f"Version {version} has no files on PyPI.")
for url in download_urls:
if url.endswith(".tar.gz"):
return url
if strict:
raise ValueError(f"Version {version} has no sdist on PyPI.")
for url in download_urls:
if url.endswith(".zip"):
return url
return download_urls[0]
def get_wheel_url(
name: str,
version: Union[str, int, Version],
strict: bool = False,
) -> str:
"""
Returns the URL of one of the project's wheels on PyPI.
For finer control over which wheel the URL is for see the :func:`~.get_wheel_tag_mapping` function.
.. versionadded:: 0.15.0
:param name: The name of the project on PyPI.
:param version:
:param strict: Causes a :exc:`ValueError` to be raised if no wheels are found,
rather than retuning a wheel.
.. attention::
If no wheels are found this function may return an sdist
unless ``strict`` is :py:obj:`True`.
"""
tag_url_map, non_wheel_urls = get_wheel_tag_mapping(name, version)
for tag in sys_tags():
if tag in tag_url_map:
return str(tag_url_map[tag])
if strict:
raise ValueError(f"Version {version} has no wheels on PyPI.")
elif not non_wheel_urls: # pragma: no cover
raise ValueError(f"Version {version} has no files on PyPI.")
else:
return str(non_wheel_urls[0])
def get_wheel_tag_mapping(
name: str,
version: Union[str, int, Version],
) -> Tuple[Dict[Tag, URL], List[URL]]:
"""
Constructs a mapping of wheel tags to the PyPI download URL of the wheel with relevant tag.
This can be used alongside :func:`packaging.tags.sys_tags` to select the best wheel for the current platform.
.. versionadded:: 0.15.0
:param name: The name of the project on PyPI.
:param version:
:returns: A tuple containing the ``tag: url`` mapping,
and a list of download URLs for non-wheel artifacts (e.g. sdists).
"""
releases = get_pypi_releases(str(name))
version = str(version)
if version not in releases:
raise InvalidRequirement(f"Cannot find version {version} on PyPI.")
download_urls = list(map(URL, releases[version]))
if not download_urls:
raise ValueError(f"Version {version} has no files on PyPI.")
tag_url_map = {}
non_wheel_urls = []
for url in download_urls:
if url.suffix == ".whl":
tags = parse_wheel_filename(url.name)[3]
for tag in tags:
tag_url_map[tag] = url
else:
non_wheel_urls.append(url)
return tag_url_map, non_wheel_urls
| domdfcoding/shippinglabel | shippinglabel/pypi.py | pypi.py | py | 9,188 | python | en | code | 1 | github-code | 13 |
21043394096 | import sys
import platform
import bluetooth
import threading
from argparse import ArgumentParser as AP
"""
Basic bluetooth scanner
~ v1
"""
class SimpleBluetooth:
def __init__(self):
pass
@staticmethod
def basic_scan():
cfg = _Config()
# How many devices were found?...
print("\033[34m[*]\033[37m Managed to find {} devices...\033[0m".format(cfg.device_number()))
for addr, name, dc in cfg.device():
print(
"\n\t\033[37m-- Device Name:\t\t\t\033[32m{}\033[0m\n"
"\t\033[37m-- Device Address:\t\t\033[32m{}\033[0m\n"
"\t\033[37m-- Device Class:\t\t\033[32m{}\033[0m\n\n".format(name, addr, dc)
)
@staticmethod
def add_service_scan():
print("\033[34m[*]\033[37m Managed to find {} devices...\033[0m".format(_Config.basic_device_setup_list()))
for addr, name in _Config.basic_device_setup():
print("\t\033[37mFor Name:\t\t\033[33m{}\033[0m\n"
"\t\033[37mFor Address:\t\t\033[33m{}\033[0m".format(name, addr))
# Init service module
srv = bluetooth.find_service(address=addr)
if len(srv) == 0:
print("\t\033[31mNo services found for device...\033[0m\n")
else:
print("\t\033[37mService(s) found:\n\t\033[33m{}\033[0m\n\n".format(srv))
continue
class _Config:
def __init__(self):
self.devices = bluetooth.discover_devices(lookup_names=True, lookup_class=True)
self.devno = len(self.devices)
def device(self):
return self.devices
def device_number(self):
return self.devno
@staticmethod
def basic_device_setup():
device = bluetooth.discover_devices(duration=5, lookup_names=True)
return device
@staticmethod
def basic_device_setup_list():
device = bluetooth.discover_devices(duration=5, lookup_names=True)
devno = len(device)
return devno
@staticmethod
def get_version():
return "v1.0 -- m1"
@staticmethod
def check_os():
if platform.system() == "win32":
print("Please run on Linux..."); exit(1)
def main():
_Config.check_os()
ops = AP(usage="blues.py [OPTIONS] | -h, --help", conflict_handler="resolve")
ops.add_argument('-v', '--version', action="store_true", dest="get_version", help="Print module version and exit")
ops.add_argument('-b', '--basic', action="store_true", dest="init_basic_scan", help="Initialize a basic bluetooth scan")
ops.add_argument('-s', '--service-scan', action="store_true", dest="add_service_scan", help="Add service scanning to bluetooth scan")
args = ops.parse_args()
if args.get_version:
print("\n\033[37m++ Module Version: {}\033[0m\n".format(_Config.get_version()))
if args.init_basic_scan:
print("\033[32m[+]\033[37m Running basic bluetooth scanner...\033[0m")
t = threading.Thread(target=SimpleBluetooth.basic_scan(), args=(1,))
t.start()
if args.add_service_scan:
print("\033[32m[+]\033[37m Running service scanner...\033[0m")
t = threading.Thread(target=SimpleBluetooth.add_service_scan(), args=(1,))
t.start()
main() | 0pointNull/bluescan | basic.py | basic.py | py | 3,285 | python | en | code | 0 | github-code | 13 |
14122389849 | import unittest
import numpy as np
from pyfda.libs import pyfda_fix_lib as fx
from pyfda.fixpoint_widgets.fir_df import FIR_DF_wdg
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
q_dict = {'WI':0, 'WF':3, 'ovfl':'sat', 'quant':'round', 'fx_base': 'dec', 'scale': 1}
self.myQ = fx.Fixed(q_dict) # instantiate fixpoint object with settings above
self.y_list = [-1.1, -1.0, -0.5, 0, 0.5, 0.9, 0.99, 1.0, 1.1]
self.y_list_cmplx = [-1.1j + 0.1, -1.0 - 0.3j, -0.5-0.5j, 0j, 0.5j, 0.9, 0.99+0.3j, 1j, 1.1]
# list with various invalid strings
self.y_list_validate = ['1.1.1', 'xxx', '123', '1.23', '', 1.23j + 3.21, '3.21 + 1.23 j']
self.dut = FIR_DF_wdg
#
# def test_shuffle(self):
# # make sure the shuffled sequence does not lose any elements
# random.shuffle(self.seq)
# self.seq.sort()
# self.assertEqual(self.seq, range(10))
#
# def test_choice(self):
# element = random.choice(self.seq)
# self.assertTrue(element in self.seq)
#
# def test_sample(self):
# self.assertRaises(ValueError, random.sample, self.seq, 20)
# for element in random.sample(self.seq, 5):
# self.assertTrue(element in self.seq)
def test_write_q_dict(self):
"""
Check whether parameters are written correctly to the fixpoint instance
"""
q_dict = {'WI':7, 'WF':3, 'ovfl':'none', 'quant':'fix', 'fx_base': 'hex', 'scale': 17}
self.myQ.set_qdict(q_dict)
# self.assertEqual(q_dict, self.myQ.q_obj)
# check whether Q : 7.3 is resolved correctly as WI:7, WF: 3
q_dict2 = {'Q': '6.2'}
self.myQ.set_qdict(q_dict2)
# self.assertEqual(q_dict2, self.myQ.q_obj)
self.myQ.set_qdict({'W': 13})
self.assertEqual(12, self.myQ.q_dict['WI'])
self.assertEqual(0, self.myQ.q_dict['WF'])
self.assertEqual('12.0', self.myQ.q_dict['Q'])
# check whether option 'norm' sets the correct scale
self.myQ.set_qdict({'scale':'norm'})
self.assertEqual(2**(-self.myQ.q_dict['WI']), self.myQ.q_dict['scale'])
# check whether option 'int' sets the correct scale
self.myQ.set_qdict({'scale':'int'})
self.assertEqual(1<<self.myQ.q_dict['WF'], self.myQ.q_dict['scale'])
def test_fix_no_ovfl(self):
"""
Test the actual fixpoint quantization without saturation / wrap-around. The 'fx_base'
keyword is not regarded here.
"""
# return fixpoint numbers as float (no saturation, no quantization)
q_dict = {'WI':0, 'WF':3, 'ovfl':'none', 'quant':'none', 'fx_base': 'dec', 'scale': 1}
self.myQ.set_qdict(q_dict)
# test handling of invalid inputs - scalar inputs
yq_list = list(map(self.myQ.fixp, self.y_list_validate))
yq_list_goal = [0, 0, 123.0, 1.23, 0, 3.21, 3.21]
self.assertEqual(yq_list, yq_list_goal)
# same in vector format
yq_list = list(self.myQ.fixp(self.y_list_validate))
yq_list_goal = [0, 0, 123.0, 1.23, 0, 3.21, 3.21]
self.assertListEqual(yq_list, yq_list_goal)
# return fixpoint numbers as float (no saturation, no quantization)
# use global list
yq_list = list(self.myQ.fixp(self.y_list))
yq_list_goal = self.y_list
self.assertEqual(yq_list, yq_list_goal)
# test scaling (multiply by scaling factor)
q_dict = {'scale': 2}
self.myQ.set_qdict(q_dict)
yq_list = list(self.myQ.fixp(self.y_list) / 2.)
self.assertEqual(yq_list, yq_list_goal)
# test scaling (divide by scaling factor)
yq_list = list(self.myQ.fixp(self.y_list, scaling='div') * 2.)
self.assertEqual(yq_list, yq_list_goal)
# return fixpoint numbers as float (rounding)
q_dict = {'quant':'round', 'scale': 1}
self.myQ.set_qdict(q_dict)
yq_list = list(self.myQ.fixp(self.y_list))
yq_list_goal = [-1.125, -1.0, -0.5, 0, 0.5, 0.875, 1.0, 1.0, 1.125]
self.assertEqual(yq_list, yq_list_goal)
# wrap around behaviour with 'fix' quantization; fractional representation
q_dict = {'WI':5, 'WF':2, 'ovfl':'wrap', 'quant':'fix', 'fx_base': 'dec', 'scale': 8}
self.myQ.set_qdict(q_dict)
yq_list = list(self.myQ.fixp(self.y_list))
yq_list_goal = [-8.75, -8.0, -4.0, 0.0, 4.0, 7.0, 7.75, 8.0, 8.75]
self.assertEqual(yq_list, yq_list_goal)
# return fixpoint numbers as integer (rounding), overflow 'none'
q_dict = {'WI':3, 'WF':0, 'ovfl':'none', 'quant':'round', 'fx_base': 'dec', 'scale': 8}
self.myQ.set_qdict(q_dict)
yq_list = list(self.myQ.fixp(self.y_list))
yq_list_goal = [-9, -8, -4, 0, 4, 7, 8, 8, 9]
self.assertEqual(yq_list, yq_list_goal)
# input list of strings
y_string = ['-1.1', '-1.0', '-0.5', '0', '0.5', '0.9', '0.99', '1.0', '1.1']
yq_list = list(self.myQ.fixp(y_string))
yq_list_goal = [-9, -8, -4, 0, 4, 7, 8, 8, 9]
self.assertEqual(yq_list, yq_list_goal)
# frmt float
q_dict = {'fx_base': 'float'}
self.myQ.set_qdict(q_dict)
yq_list = list(self.myQ.fixp(y_string))
self.assertEqual(yq_list, yq_list_goal)
if __name__=='__main__':
unittest.main()
# run tests with python -m pyfda.tests.test_pyfda_fir_df
| chipmuenk/pyfda | pyfda/tests/test_fir_df.py | test_fir_df.py | py | 5,413 | python | en | code | 601 | github-code | 13 |
38996510390 | import firebase_admin
from firebase_admin import firestore
firebase_admin.initialize_app()
db = firestore.client()
newsRef = db.collection('news');
news= newsRef.get();
for key, value in news.items():
if(not (key == "info") ):
desc = value["description"]
ref.child(key).update({"description":desc.lower()})
| artcodefun/test_news | migration/migration.py | migration.py | py | 317 | python | en | code | 0 | github-code | 13 |
34466087904 | import requests
import json
from common.commonData import CommonData
class HttpUtil:
def __init__(self):
self.http=requests.session()
self.headers={'Content-Type':'application/json;charset=UTF-8'}
def post(self,path,data):
host=CommonData.host #获取全局变量host路径
data_json=json.dumps(data) #将python格式转为json双引号格式
resp=self.http.post(url=host+path,
data=data_json,
headers=self.headers)
assert resp.status_code==200
resp_json=resp.text
resp_dict=json.loads(resp_json)
return resp_dict | lihanhuan/pytest-api | util/httpUtil.py | httpUtil.py | py | 668 | python | en | code | 0 | github-code | 13 |
6606379293 | """security URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.root),
path('home/',views.home),
path('search/',views.search),
path('registration/',views.registration),
path('login/',views.logins),
path('logout/',views.logouts),
path('data/',views.root),
path('add_relatives/',views.add_relatives),
path('profile/<int:relation_id>',views.profile),
path('notifications/',views.notifications),
path('rem_relatives/',views.rem_relatives),
path('__debug__/', include('debug_toolbar.urls')),
path("__reload__/", include("django_browser_reload.urls")),
path('api/',include('api.urls')),
]
| nikhil631/Portable-Distress-Security-System | joe/Portable Distress System/security/security/urls.py | urls.py | py | 1,365 | python | en | code | 1 | github-code | 13 |
31328859259 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import DataLoader
from utils.datasets import *
from transformers import BertTokenizer
import nltk
from tqdm.notebook import tqdm
from models.bert import BertForEmbeddings
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pad_symbol = '<pad>'
def get_embedding_weights(X, path, emb_size):
sentence_tokens = [nltk.word_tokenize(sentence) for sentence in X]
dictionary = {word for tokens in sentence_tokens for word in tokens}
dictionary.add(pad_symbol)
word2idx = {word: index for index, word in enumerate(dictionary)}
matrix_len = len(dictionary)
weights_matrix = np.zeros((matrix_len, emb_size))
embeddings_dict = dict()
with open(path, 'r', encoding='utf8') as f:
for line in tqdm(f):
word, vec = line.split(' ', 1)
if word in dictionary:
embeddings_dict[word] = np.fromstring(vec, sep=' ')
for word, index in tqdm(word2idx.items()):
try:
weights_matrix[index] = embeddings_dict[word]
except KeyError:
weights_matrix[index] = np.random.uniform(size=emb_size)
return weights_matrix, word2idx
def sentences_to_idx(X, word2idx, max_len, shuffle):
if shuffle:
X = shuffle_sentences(X)
sentence_tokens = [nltk.word_tokenize(sentence) for sentence in X]
for i, sentence_token in enumerate(sentence_tokens):
tokens_to_add = max(0, max_len - len(sentence_token))
sentence_tokens[i] += ([pad_symbol] * tokens_to_add)[:max_len]
return np.array([[word2idx[word] for word in sentence] for sentence in sentence_tokens])
def get_dataloaders(X, y, test_size=0.2, batch_size=32):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=test_size, stratify=y, random_state=42)
train_dataset = EmotionsDataset(X_train, y_train)
val_dataset = EmotionsDataset(X_val, y_val)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
return train_dataloader, val_dataloader
def preprocess_data(X, y, path, emb_size, max_len, shuffle=False):
embedding_weights, word2idx = get_embedding_weights(X, path, emb_size)
label_encoder = LabelEncoder().fit(y)
X = sentences_to_idx(X, word2idx, max_len, shuffle)
y = label_encoder.transform(y)
return X, y, word2idx, embedding_weights
def save_embedding_layer(classification_model, params, path_to_load, path_to_save):
model = classification_model(*params).to(device)
model.load_state_dict(torch.load(path_to_load))
model.eval()
modules = [module for module in model.modules()]
torch.save(modules[1].state_dict(), path_to_save)
print('Saved!')
def get_embeddings(X, embedding_model, params, path):
model = embedding_model(*params).to(device)
model.load_state_dict(torch.load(path))
model.eval()
sentence_embeddings = []
for sentence in tqdm(X):
sentence_embeddings.append(
model(torch.tensor(sentence).long().unsqueeze(0).to(device)).squeeze(0).detach().cpu().numpy())
sentence_embeddings = np.array(sentence_embeddings)
return sentence_embeddings
def save_embeddings(embeddings, path):
columns = ['emb_dim_{}'.format(i + 1) for i in range(embeddings.shape[1])]
pd.DataFrame(embeddings, columns=columns).to_csv(path, index=False)
print('Saved!')
def shuffle_sentence(sentence):
n = len(sentence)
while True:
permutation = np.random.permutation(n)
if (permutation != np.arange(n)).all() == 0:
return np.array(sentence)[permutation]
def shuffle_sentences(sentences):
shuffled_sentences = []
for sentence in tqdm(sentences):
tokens = sentence.split(' ')
shuffled_sentences.append(' '.join(shuffle_sentence(tokens)))
return shuffled_sentences
def get_bert_dataloaders(X, y, test_size=0.2, batch_size=16, shuffle=False):
label_encoder = LabelEncoder().fit(y)
X, y = X.values, label_encoder.transform(y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=test_size, shuffle=True, random_state=42, stratify=y)
if shuffle:
sentences = [sentence for sentence in X_val]
shuffled_sentences = shuffle_sentences(sentences)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
tokenized_train_text = tokenizer(list(X_train), truncation=True, padding=True)
tokenized_val_text = tokenizer(list(X_val), truncation=True, padding=True)
train_dataset = BertDataset(tokenized_train_text, y_train)
val_dataset = BertDataset(tokenized_val_text, y_val)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
if shuffle:
tokenized_val_text_shuffled = tokenizer(list(shuffled_sentences), truncation=True, padding=True)
val_dataloader_shuffled = DataLoader(BertDataset(tokenized_val_text_shuffled, y_val), batch_size=batch_size)
if not shuffle:
return train_dataloader, val_dataloader
else:
return train_dataloader, val_dataloader, val_dataloader_shuffled
def get_bert_embeddings(X, path):
model = BertForEmbeddings().to(device)
model.load_state_dict(torch.load(path))
model.eval()
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
tokenized_text = tokenizer(list(X), truncation=True, padding=True)
sentence_embeddings = []
for input_id, attention_mask in tqdm(zip(tokenized_text['input_ids'], tokenized_text['attention_mask'])):
input_id, attention_mask = torch.tensor(input_id).to(device), torch.tensor(attention_mask).to(device)
sentence_embeddings.append(
model(input_id.unsqueeze(0), attention_mask.unsqueeze(0)).squeeze(0).detach().cpu().numpy())
sentence_embeddings = np.array(sentence_embeddings)
return sentence_embeddings
def get_embeddings_dataloaders(path, y, batch_size=32):
X = pd.read_csv(path).values
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
train_dataset = ClassificationDataset(X_train, y_train)
val_dataset = ClassificationDataset(X_val, y_val)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
return train_dataloader, val_dataloader
| mymiptshame/nlp_NMA | utils/utils.py | utils.py | py | 6,714 | python | en | code | 0 | github-code | 13 |
26212296155 | import os
import math
import sys
import argparse
import youtube_dl
BEST_FORMAT = "bestvideo+bestaudio/best"
PARSER = argparse.ArgumentParser(description="Youtube Video Downloader")
PARSER.add_argument(
'--Url',
'-u',
type=str,
help='YouTube video or playlist url')
PARSER.add_argument(
"--Download",
'-d',
type=bool,
nargs='?',
const=True, default=False,
)
PARSER.add_argument(
"--Verbose",
'-v',
type=bool,
nargs='?',
const=True, default=False
)
class ResourceNotFoundError(Exception):
pass
class NoFilesizeError(Exception):
pass
class TotalSize:
def __init__(self, url):
self._ydl = youtube_dl.YoutubeDL({"quiet": True, "no_warnings": True})
self._selector = self._ydl.build_format_selector(BEST_FORMAT)
try:
preinfo = self._ydl.extract_info(url, process=False)
except youtube_dl.utils.DownloadError:
raise ResourceNotFoundError
if 'entries' in preinfo:
self._videos = list(preinfo['entries'])
else:
self._videos = [preinfo]
self.number_of_videos = len(self._videos)
def _get_size(self, info):
try:
video = self._ydl.process_ie_result(info, download=False)
except youtube_dl.utils.DownloadError:
raise NoFilesizeError
try:
best = next(self._selector(video))
except KeyError:
best = video
try:
if 'requested_formats' in best:
size = sum(int(f['filesize'])
for f in best['requested_formats'])
else:
size = int(best['filesize'])
except (TypeError, KeyError):
raise NoFilesizeError
return size
def _readable_size(self, size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def get_totalsize(self):
totalsize = 0
for video in self._videos:
try:
size = self._get_size(video)
except NoFilesizeError:
print('Filesize of "%s" is unavailable.' %
video['title'], file=sys.stderr)
else:
print('"%s": %s' % (video['title'], self._readable_size(size)))
totalsize += size
return self._readable_size(totalsize)
def main(url, get_lenght, download):
if get_lenght:
total = TotalSize(url)
print('Total size of all videos with reported filesize: ' + total.get_totalsize())
print('Total number of videos: %s' % total.number_of_videos)
if download:
os.system("youtube-dl -f best "+url)
print("Exiting")
sys.exit()
if __name__ == '__main__':
args = PARSER.parse_args()
try:
get_lenght = args.Verbose
download = args.Download
url = args.Url
main(url, get_lenght, download)
except IndexError:
sys.exit('Please supply an url.')
except ResourceNotFoundError:
sys.exit('Resource not found.')
| n1xsoph1c/customTools | youtube_downloader.py | youtube_downloader.py | py | 3,282 | python | en | code | 0 | github-code | 13 |
35013048210 | import numpy as np
import matplotlib.pyplot as plt
import cv2
import scipy
from matplotlib import pyplot as plt
#Add imports if needed:
from scipy import interpolate
import time
#end imports
#Add extra functions here:
def creatPanom(HpanoList, outsize, filepath='sintra/sintra'):
"""""stiching"""""
"""1to2"""
img = cv2.imread(filepath + '1_pyr.png')
imgB = cv2.imread(filepath + '2_pyr.png')
warpImg1to2 = warpH(img, HpanoList[0], outsize, LAB_space=False, kind='linear')
pano12 = imageStitching(imgB, warpImg1to2)
"""12to3"""
img = pano12
imgB = cv2.imread(filepath + '3_pyr.png')
warpImg12to3 = warpH(img, HpanoList[1], outsize, LAB_space=False, kind='linear')
pano123 = imageStitching(imgB, warpImg12to3)
"""123to4"""
imgB = cv2.imread(filepath + '4_pyr.png')
img = pano123
warpImg123to4 = warpH(img, HpanoList[2], outsize, LAB_space=False, kind='linear')
pano1234 = imageStitching(imgB,warpImg123to4)
"""5to1234"""
img = cv2.imread(filepath + '5_pyr.png')
imgB = pano1234
warpImg5to1234 = warpH(img, HpanoList[4], outsize, LAB_space=False, kind='linear')
pano12345 = imageStitching(imgB,warpImg5to1234)
pano12345 = whiteBackground(pano12345)
cv2.imwrite(filepath + "_pano.png", pano12345)
return pano12345
def creatHtoImg4(index=[1,2,3,4,5], target=4, N=6, SiftTreshhold=0.3,
filepath='sintra/sintra',manual=False, RANSAC=False, nIter=1000, tol=1):
HpanoList = []
for i in index:
if (i == target):
HpanoList.append(0)
if (i<target):
filepath_projection = filepath + str(i) + '_pyr.png'
filepath_base = filepath + str(i+1) + '_pyr.png'
img = cv2.imread(filepath_projection)
imgB = cv2.imread(filepath_base)
if (manual):
p1, p2 = getPoints(cv2.cvtColor(imgB, cv2.COLOR_RGB2GRAY), cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), 5)
else:
p1, p2 = getPoints_SIFT(imgB, img, N=N, treshhold=SiftTreshhold)
if(RANSAC):
H_sift_sintra = ransacH(p1, p2, nIter=nIter, tol=tol)
else:
H_sift_sintra = computeH(p1, p2)
HpanoList.append(H_sift_sintra)
if (i>target):
filepath_projection = filepath + str(i) + '_pyr.png'
filepath_base = filepath + str(i-1) + '_pyr.png'
img = cv2.imread(filepath_projection)
imgB = cv2.imread(filepath_base)
if (manual):
p1, p2 = getPoints(cv2.cvtColor(imgB, cv2.COLOR_RGB2GRAY), cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), 5)
else:
p1, p2 = getPoints_SIFT(imgB, img, N=N, treshhold=SiftTreshhold)
if (RANSAC):
H_sift_sintra = ransacH(p1, p2, nIter=nIter, tol=tol)
else:
H_sift_sintra = computeH(p1, p2)
HpanoList.append(H_sift_sintra)
return HpanoList
def whiteBackground(img):
white = np.ones((img.shape[0], img.shape[1], 3), np.uint8)*255
for y in range(white.shape[0]):
for x in range(white.shape[1]):
if (img[y, x, 0] > 0):
white[y, x, :] = img[y, x, :]
return white
def pyrDownImages(pyrDownIter=2, filepath='sintra/sintra'):
img1 = cv2.imread(filepath + '1.JPG')
img2 = cv2.imread(filepath + '2.JPG')
img3 = cv2.imread(filepath + '3.JPG')
img4 = cv2.imread(filepath + '4.JPG')
img5 = cv2.imread(filepath + '5.JPG')
while (pyrDownIter > 0):
pyrDownIter = pyrDownIter - 1
img1 = cv2.pyrDown(img1)
img2 = cv2.pyrDown(img2)
img3 = cv2.pyrDown(img3)
img4 = cv2.pyrDown(img4)
img5 = cv2.pyrDown(img5)
cv2.imwrite(filepath + '1_pyr.png', img1)
cv2.imwrite(filepath + '2_pyr.png', img2)
cv2.imwrite(filepath + '3_pyr.png', img3)
cv2.imwrite(filepath + '4_pyr_real.png', img4)
cv2.imwrite(filepath + '5_pyr.png', img5)
def creatHtoImgPano(index=[1,2,3,4,5], target=4, filepath='sintra/sintra'):
Hlist = []
HpanoList = []
for i in index:
if (i<target):
filepath_projection = filepath + str(i) + '_pyr.png'
filepath_base = filepath + str(i+1) + '_pyr.png'
img = cv2.imread(filepath_projection)
imgB = cv2.imread(filepath_base)
p1, p2 = getPoints_SIFT(imgB, img, 15, treshhold=0.15)
H_sift_sintra = computeH(p1, p2)
Hlist.append(H_sift_sintra)
if (i>target):
filepath_projection = filepath + str(i) + '_pyr.png'
filepath_base = filepath + str(i-1) + '_pyr.png'
img = cv2.imread(filepath_projection)
imgB = cv2.imread(filepath_base)
p1, p2 = getPoints_SIFT(imgB, img, 15, treshhold=0.15)
H_sift_sintra = computeH(p1, p2)
Hlist.append(H_sift_sintra)
HpanoList.append(np.matmul(Hlist[0], Hlist[1]))
HpanoList.append(Hlist[1])
HpanoList.append(0)
HpanoList.append(Hlist[2])
HpanoList.append(Hlist[3] @ Hlist[2])
return HpanoList
def baseImageTranslation(img, outsize, shiftX, shiftY):
ImageTran = np.zeros((outsize[0], outsize[1], 3), np.uint8)
for y in range(ImageTran.shape[0]):
for x in range(ImageTran.shape[1]):
if (x < img.shape[1] and y < img.shape[0]):
ImageTran[y + shiftY, x + shiftX, :] = img[y, x, :]
return ImageTran
#Extra functions end
# HW functions:
def getPoints(im1,im2,N):
"""remarks N points in img 1 then matching them in img 2"""
plt.imshow(cv2.cvtColor(im1, cv2.COLOR_GRAY2RGB))
p1 = np.array(plt.ginput(N,0)).T
plt.close()
plt.imshow(cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB))
p2 = np.array(plt.ginput(N,0)).T
plt.close()
return p1,p2
def computeH(p1, p2):
assert (p1.shape[1] == p2.shape[1])
assert (p1.shape[0] == 2)
A = np.zeros((2 * p2.shape[1], 9))
H2to1 = []
"""building matrix A"""
for i in range(0, 2 * p2.shape[1] - 1, 2):
A[i][0] = p2[0, i // 2]
A[i][1] = p2[1, i // 2]
A[i][2] = 1
A[i + 1][3] = p2[0, i // 2]
A[i + 1][4] = p2[1, i // 2]
A[i + 1][5] = 1
A[i][6] = -p2[0, i // 2] * p1[0, i // 2]
A[i][7] = -p2[1, i // 2] * p1[0, i // 2]
A[i][8] = -p1[0, i // 2]
A[i + 1][6] = -p2[0, i // 2] * p1[1, i // 2]
A[i + 1][7] = -p2[1, i // 2] * p1[1, i // 2]
A[i + 1][8] = -p1[1, i // 2]
D, V = np.linalg.eig(A.T @ A)
H2to1 = np.array(V[:, -1]).reshape((3,3))
return H2to1
def computeAffineH(p1, p2):
assert (p1.shape[1] == p2.shape[1])
assert (p1.shape[0] == 2)
A = np.zeros((2 * p2.shape[1], 6))
b = np.zeros((2 * p2.shape[1], 1))
"""building matrix A"""
for i in range(0, 2 * p2.shape[1] - 1, 2):
A[i][0] = p2[0, i // 2]
A[i][1] = p2[1, i // 2]
A[i][2] = 1
A[i + 1][3] = p2[0, i // 2]
A[i + 1][4] = p2[1, i // 2]
A[i + 1][5] = 1
b[i] = p1[0, i // 2]
b[i+1] = p1[1, i // 2]
affinH2to1 = np.array(np.linalg.inv(A.T @ A) @ A.T @ b).reshape((2,3))
thirdLine = np.array([[0, 0, 1]])
affinH2to1 = np.concatenate((affinH2to1, thirdLine), axis=0)
return affinH2to1
def warpH(im2, H2to1, outsize, LAB_space=False, kind='linear'):
if (LAB_space):
im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2LAB)
l_channel, a_channel, b_channel = cv2.split(im2)
x_im2 = np.arange(l_channel.shape[1])
y_im2 = np.arange(l_channel.shape[0])
x = []
y = []
z = []
for i in range(outsize[0]):
for j in range(outsize[1]):
x.append(j)
y.append(i)
z.append(1)
p_old = np.array([x, y, z])
p_new_temp = np.linalg.inv(H2to1) @ p_old
p_new = np.array([p_new_temp[0, :] / p_new_temp[2, :], p_new_temp[1, :] / p_new_temp[2, :]])
f_l = interpolate.interp2d(x_im2, y_im2, l_channel, kind=kind)
f_a = interpolate.interp2d(x_im2, y_im2, a_channel, kind=kind)
f_b = interpolate.interp2d(x_im2, y_im2, b_channel, kind=kind)
znew_l = []
znew_a = []
znew_b = []
for i in range(p_old.shape[1]):
if (p_new[0, i] > 0 and p_new[1, i] > 0 and p_new[0, i] < im2.shape[1] and p_new[1, i] < im2.shape[0]):
znew_l_temp = np.round((f_l(p_new[0, i], p_new[1, i])))
znew_l_temp = (znew_l_temp[0]).astype('uint8')
znew_a_temp = np.round((f_a(p_new[0, i], p_new[1, i])))
znew_a_temp = (znew_a_temp[0]).astype('uint8')
znew_b_temp = np.round((f_b(p_new[0, i], p_new[1, i])))
znew_b_temp = (znew_b_temp[0]).astype('uint8')
else:
znew_l_temp = 0
znew_a_temp = 0
znew_b_temp = 0
znew_l.append(znew_l_temp)
znew_a.append(znew_a_temp)
znew_b.append(znew_b_temp)
znew_l = (np.array(znew_l).reshape((outsize[0], outsize[1]))).astype('uint8')
znew_a = (np.array(znew_a).reshape((outsize[0], outsize[1]))).astype('uint8')
znew_b = (np.array(znew_b).reshape((outsize[0], outsize[1]))).astype('uint8')
warp_im2 = np.stack([znew_l, znew_a, znew_b], 2)
if (LAB_space):
warp_im2 = cv2.cvtColor(warp_im2, cv2.COLOR_LAB2RGB)
return warp_im2
def imageStitching(img1, wrap_img2):
panoImg = np.zeros((wrap_img2.shape[0], wrap_img2.shape[1], 3), np.uint8)
for y in range(panoImg.shape[0]):
for x in range(panoImg.shape[1]):
if (x < img1.shape[1] and y < img1.shape[0]):
panoImg[y, x, :] = img1[y, x, :]
if (wrap_img2[y, x, 0] > 0):
panoImg[y, x, :] = wrap_img2[y, x, :]
return panoImg
def ransacH(p1, p2, nIter, tol):
p1 = np.array(tuple(zip(p1[0], p1[1])))
p2 = np.array(tuple(zip(p2[0], p2[1])))
bestH = cv2.findHomography(p2, p1, method=cv2.RANSAC, ransacReprojThreshold=tol, maxIters=nIter)
return bestH[0]
def getPoints_SIFT(img1,img2,N=6, treshhold=0.3):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=100) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
matchesMask = [[0, 0] for i in range(len(matches))]
p1_x = []
p1_y = []
p2_x = []
p2_y = []
p1 = []
p2 = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < treshhold * n.distance and N > 0:
N = N - 1
matchesMask[i] = [1, 0]
pt1 = kp1[m.queryIdx].pt
pt2 = kp2[m.trainIdx].pt
matches_points_1_x = pt1[0]
p1_x.append(matches_points_1_x)
matches_points_1_y = pt1[1]
p1_y.append(matches_points_1_y)
matches_points_2_x = pt2[0]
p2_x.append(matches_points_2_x)
matches_points_2_y = pt2[1]
p2_y.append(matches_points_2_y)
p1 = np.array([p1_x, p1_y])
p2 = np.array([p2_x, p2_y])
return p1,p2
if __name__ == '__main__':
print('my_homography')
# # """2.1"""
im1 = cv2.imread('incline/incline_L_pyr.png',0)
im2 = cv2.imread('incline/incline_R_pyr.png',0)
p1, p2 = getPoints(im1, im2, 3)
print(f"\np1:\n {p1}\n")
np.save("incline/p2.npy", p2)
# # """2.2"""
H2to1 = computeH(p1, p2)
print(f"\nH2to1:\n {H2to1}\n")
np.save("incline/H2to1_pyr.npy", H2to1)
# """2.3"""
H2to1 = np.load("incline/H2to1_pyr.npy")
im2 = cv2.imread('incline/incline_R_pyr.png')
outsize = np.array([350, 900])
warp_im2 = warpH(im2, H2to1, outsize, LAB_space=False, kind='cubic')
cv2.imwrite('incline/warp_im2_cubic.png', warp_im2)
# """2.4"""
img1 = cv2.imread('incline/incline_L_pyr.png')
warp_im2 = cv2.imread('incline/warp_im2.png')
panoImg = imageStitching(img1, warp_im2)
cv2.imwrite('incline/panoImg.png', panoImg)
cv2.imshow("incline/panoImg", panoImg)
cv2.waitKey(0)
"""2.5"""
img1 = cv2.imread('incline/incline_L_pyr.png')
img2 = cv2.imread('incline/incline_R_pyr.png')
p1, p2 = getPoints_SIFT(img1,img2, N=7)
print("points")
# H2to1_sift = computeH(p1, p2)
print(f"H2to1_sift:\n {H2to1_sift}\n")
print("H2to1_sift")
outsize = np.array([350, 900])
warp_im2_sift = warpH(img2, H2to1_sift, outsize, LAB_space=False, kind='cubic')
cv2.imwrite('incline/warp_im2_cubic_sift.png', warp_im2_sift)
print("warp_im2_sift")
panoImg_sift = imageStitching(img1, warp_im2_sift)
cv2.imwrite('incline/panoImg_sift.png', panoImg_sift)
cv2.imshow("incline/panoImg_sift", panoImg_sift)
cv2.waitKey(0)
"""2.7"""
"""stiching - SIFT & manual with and without RANSAC for all images"""
start = time.time()
filepath = 'sintra/sintra'
pyrDownIter = 3
pyrDownImages(pyrDownIter=pyrDownIter, filepath=filepath)
img4 = cv2.imread(filepath + '4_pyr_real.png')
## hyperParameters for sintra
if (filepath=='sintra/sintra'):
if (pyrDownIter == 0):
outsize = np.array([4000, 14400])
N = 600
SiftTreshhold = 0.12
if (pyrDownIter == 1):
outsize = np.array([2000, 7200])
N = 600
SiftTreshhold = 0.13
if (pyrDownIter == 2):
outsize = np.array([1000, 3600])
N = 300
SiftTreshhold = 0.14
if (pyrDownIter == 3):
outsize = np.array([600, 1800])
N = 35
SiftTreshhold = 0.15
img4 = baseImageTranslation(img4, outsize,
shiftX=(outsize[0] // 2)-(outsize[0] // 9),
shiftY=(outsize[1] // 7)-(outsize[1] // 17))
## hyperParameters for beach
if (filepath=='beach/beach'):
if (pyrDownIter == 2):
outsize = np.array([2500, 1200])
N = 15
SiftTreshhold = 0.3
if (pyrDownIter == 1):
outsize = np.array([5000, 2400])
N = 50
SiftTreshhold = 0.25
if (pyrDownIter == 0):
outsize = np.array([10000, 4800])
N = 120
SiftTreshhold = 0.14
img4 = baseImageTranslation(img4, outsize,
shiftX=outsize[1] // 5,
shiftY=outsize[0] // 10)
## hyperParameters for haifa
if (filepath=='haifa/haifa'):
if (pyrDownIter == 3):
SiftTreshhold = 0.15
N =100
outsize = np.array([900, 2500]) ## beach 2 pyr
if (pyrDownIter == 2):
SiftTreshhold = 0.15
N = 100
outsize = np.array([1800, 5000]) ## beach 2 pyr
if (pyrDownIter == 1):
SiftTreshhold = 0.15
N = 200
outsize = np.array([3600, 10000]) ## beach 2 pyr
if (pyrDownIter == 0):
SiftTreshhold = 0.14
N = 400
outsize = np.array([7200, 20000]) ## beach 2 pyr
img4 = baseImageTranslation(img4, outsize,
shiftX=outsize[1] // 7 - 100,
shiftY=outsize[0] // 6)
cv2.imwrite(filepath + '4_pyr.png', img4)
HpanoList = creatHtoImg4(index=[1,2,3,4,5], target=4, N=N,
SiftTreshhold=SiftTreshhold, filepath=filepath,
manual=False, RANSAC=True, nIter=1000, tol=1)
pano12345 = creatPanom(HpanoList, outsize, filepath=filepath)
end = time.time()
print(f"\nRun Time:\n {end-start}\n")
cv2.imshow(filepath + "_pano.png", pano12345)
"""2.11"""
"""affine VS projection"""
"""GOOD RESULTS"""
img1 = cv2.imread('bay/bay1.jpg')
img2 = cv2.imread('bay/bay2.jpg')
outsize = np.array([700, 1500])
img1 = baseImageTranslation(img1, outsize,
shiftX=0,
shiftY=outsize[0] // 10)
p1, p2 = getPoints_SIFT(img1,img2, N=30, treshhold=0.15)
print("points")
H2to1_affine = computeAffineH(p1, p2)
print(f"H2to1_sift:\n {H2to1_affine}\n")
warp_im2_affine = warpH(img2, H2to1_affine, outsize, LAB_space=False, kind='linear')
panoImg_affine = imageStitching(img1, warp_im2_affine)
cv2.imwrite('bay/bay_affine.png', panoImg_affine)
H2to1_projective = computeH(p1, p2)
print(f"H2to1_sift:\n {H2to1_projective}\n")
warp_im2_projective = warpH(img2, H2to1_projective, outsize, LAB_space=False, kind='linear')
panoImg_projective = imageStitching(img1, warp_im2_projective)
cv2.imwrite('bay/bay_projective.png', panoImg_projective)
"""BAD RESULTS"""
img1 = cv2.imread('sintra/sintra2_pyr.png')
img2 = cv2.imread('sintra/sintra1_pyr.png')
outsize = np.array([1300, 2000])
img1 = baseImageTranslation(img1, outsize,
shiftX=0,
shiftY=outsize[0] // 10)
p1, p2 = getPoints_SIFT(img1,img2, N=30, treshhold=0.15)
print("points")
H2to1_affine = computeAffineH(p1, p2)
print(f"H2to1_sift:\n {H2to1_affine}\n")
warp_im2_affine = warpH(img2, H2to1_affine, outsize, LAB_space=False, kind='linear')
panoImg_affine = imageStitching(img1, warp_im2_affine)
cv2.imwrite('sintra/sintra_affine.png', panoImg_affine)
H2to1_projective = computeH(p1, p2)
print(f"H2to1_sift:\n {H2to1_projective}\n")
warp_im2_projective = warpH(img2, H2to1_projective, outsize, LAB_space=False, kind='linear')
panoImg_projective = imageStitching(img1, warp_im2_projective)
cv2.imwrite('sintra/sintra_projective.png', panoImg_projective) | shalip91/Homography | my_homography.py | my_homography.py | py | 17,427 | python | en | code | 0 | github-code | 13 |
7317626054 | from langchain.llms import LlamaCpp
from langchain.prompts import PromptTemplate
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import paho.mqtt.client as mqtt
from dotenv import load_dotenv
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
import pyttsx3
from pywhispercpp.examples.assistant import Assistant
from queue import Queue
from threading import Thread, Lock
import time
MQTT_SERVER = "mqtt.eclipseprojects.io"
CLIENTID = "esp32-dht22-clientId-cdf7"
PASSWORD = ""
SUBTOPIC_LED = "esp32-dht22/LED"
SUBTOPIC_DOOR = "esp32-dht22/DOOR"
SUBTOPIC_TEMP = "esp32-dht22/Temp"
SUBTOPIC_HUMIDITY = "esp32-dht22/Humidity"
MODEL = r"../models/mistral-7b-instruct-v0.1.Q8_0.gguf"
# Define your desired data structure.
class State(BaseModel):
light: int = Field(description="1 for on, 0 for off", ge=0, le=1)
# door: int = Field(description="1 for open, 0 for closed", ge=0, le=1)
msg: str = Field(description="Response to the user's commands")
def on_message(client, userdata, msg):
topic = msg.topic
payload = msg.payload.decode("utf-8")
print(f"Received message from topic {topic}: {payload}")
client = mqtt.Client()
client.username_pw_set(CLIENTID, PASSWORD)
# Subscribe to the topics for temperature and humidity
client.subscribe(SUBTOPIC_TEMP)
client.subscribe(SUBTOPIC_HUMIDITY)
client.connect(MQTT_SERVER, 1883, 60)
client.on_message = on_message
def speak(audio):
def run_speech(audio_to_speak):
global isSpeaking
with speak_lock:
isSpeaking = True
engine = pyttsx3.init("sapi5")
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[1].id)
engine.say(audio_to_speak)
engine.runAndWait()
time.sleep(0.5) # Delay to ensure no overlap between speaking and listening
with speak_lock:
isSpeaking = False
speech_thread = Thread(target=run_speech, args=(audio,))
speech_thread.start()
def send_chat(state, user_input, llm):
template = f"""
The current environment data: {state}
"""
parser = PydanticOutputParser(pydantic_object=State)
prompt = PromptTemplate(
template="<s>[INST]\n{format_instructions}\n{input}\nNo explanations are neded other than the JSON."
+ template
+ "[/INST]",
input_variables=["input"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
_input = prompt.format_prompt(input=user_input)
print("INPUT: " + _input.to_string())
output = llm(_input.to_string())
return parser.parse(output)
def publish_state(state: State):
print(state.msg)
speak(state.msg)
client.publish(SUBTOPIC_LED, "on" if state.light == 1 else "off")
# client.publish(SUBTOPIC_DOOR, "on" if state.door == 1 else "off")
return state
# Global state
state = State(light=1, msg="The light is currently on")
blank_audio = "[BLANK_AUDIO]"
isSpeaking = False
speak_lock = Lock()
def main():
global state
load_dotenv()
# Start the loop to keep listening for incoming messages
client.loop_start()
# Callbacks support token-wise streaming
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(
seed=100,
model_path=MODEL,
temperature=0,
max_tokens=100,
top_p=1,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
)
speak("Starting up, please wait")
# cache intial state and instructions
state = send_chat(state, "Please output the current state of the house", llm)
# callback function that calls send_chat
def parse_audio(user_input):
global state
if isSpeaking or len(user_input.strip()) < 10 or user_input.strip() == blank_audio:
return
print("user_input: " + user_input)
state = send_chat(state, user_input, llm)
try:
publish_state(state)
except Exception as e:
print(e)
# def test(user_input):
# if isSpeaking or len(user_input.strip()) < 10 or user_input.strip() == blank_audio:
# return
# print(user_input)
# print("Speak is being called")
# speak(user_input)
my_assistant = Assistant(commands_callback=parse_audio , n_threads=8)
speak("Ready to take in commands")
my_assistant.start()
# while True:
# print(state)
# user_input = input("> ")
# if len(user_input) == 0:
# continue
# state = send_chat(state, user_input, llm)
# try:
# publish_state(state)
# except Exception as e:
# print(e)
if __name__ == "__main__":
main()
# Prompts to demo:
# Please help to turn on the light
# Please help to turn off the light
| MarcusTXK/esp32-llm-bridge | mvp_v3/test.py | test.py | py | 5,150 | python | en | code | 0 | github-code | 13 |
70059062099 | # vim: expandtab:tabstop=4:shiftwidth=4
"""
This is the base class for our gcp utility classes.
"""
import os
# pylint: disable=import-error
from apiclient.discovery import build
from oauth2client.client import GoogleCredentials
# Specifically exclude certain regions
DRY_RUN_MSG = "*** DRY RUN, NO ACTION TAKEN ***"
# pylint: disable=too-many-instance-attributes
class Base(object):
""" Class that provides base methods for other gcp utility classes """
# Shared by all of the utilities
_volumes = None
_instances = None
_snapshots = None
def __init__(self, project, region_name, creds_path=None, verbose=False):
""" Initialize the class """
if not creds_path:
credentials = GoogleCredentials.get_application_default()
else:
credentials = GoogleCredentials.from_stream(creds_path)
self._credentials = credentials
self.scope = build('compute', 'beta', credentials=self._credentials)
self.project = project
self.region_name = region_name
self._region = None
self.verbose = verbose
self.volumes = None
self.snapshots = None
@property
def volumes(self):
'''property for all volumes in a region'''
if not Base._volumes:
Base._volumes = self.get_all_volumes()
return Base._volumes
# pylint: disable=no-self-use
@volumes.setter
def volumes(self, vols):
'''setter for volumes'''
Base._volumes = vols
@property
def snapshots(self):
'''property for all snapshots'''
if not Base._snapshots:
Base._snapshots = self.get_all_snapshots()
return Base._snapshots
@snapshots.setter
def snapshots(self, snaps):
'''setter for snapshots'''
Base._snapshots = snaps
@property
def instances(self):
'''property for all instances in a region'''
if Base._instances == None:
Base._instances = self.get_all_instances()
return Base._instances
@instances.setter
def instances(self, instances):
'''setter for instances in a region'''
Base._instances = instances
@property
def region(self):
'''property for region'''
if self._region == None:
self._region = self.scope.regions().get(project=self.project, region=self.region_name).execute()
return self._region
def verbose_print(self, msg="", prefix="", end="\n"):
""" Prints msg using prefix and end IF verbose is set on the class. """
if self.verbose:
print("%s%s%s") % (prefix, msg, end),
def print_dry_run_msg(self, prefix="", end="\n"):
""" Prints a dry run message. """
self.verbose_print(DRY_RUN_MSG, prefix=prefix, end=end)
def get_all_snapshots(self):
'''return all volumes'''
return self.scope.snapshots().list(project=self.project).execute()['items']
def get_all_instances(self):
'''return all instances'''
instances = []
for zone in self.region['zones']:
results = self.scope.instances().list(project=self.project,
zone=os.path.basename(zone),
).execute()
if results.has_key('items'):
instances.extend(results['items'])
return instances
def get_all_volumes(self):
'''return all volumes'''
vols = []
for zone in self.region['zones']:
results = self.scope.disks().list(project=self.project,
zone=os.path.basename(zone),
).execute()
if results.has_key('items'):
vols.extend(results['items'])
return vols
@staticmethod
def get_supported_regions(project, creds_path=None):
""" Returns the zones that we support for the region passed."""
credentials = None
if creds_path == None:
credentials = GoogleCredentials.get_application_default()
else:
credentials = GoogleCredentials.from_stream(creds_path)
scope = build('compute', 'beta', credentials=credentials)
regions = scope.regions().list(project=project).execute()['items']
supported_regions = [reg for reg in regions if not reg.has_key('deprecated')]
return supported_regions
def print_volume(self, volume, prefix=""):
""" Prints out the details of the given volume. """
self.verbose_print("%s:" % volume['name'], prefix=prefix)
self.verbose_print(" Tags:", prefix=prefix)
for key, val in volume['labels'].items():
self.verbose_print(" %s: %s" % (key, val), prefix=prefix)
def print_snapshots(self, snapshots, msg=None, prefix=""):
""" Prints out the details for the given snapshots. """
if msg:
self.verbose_print(msg, prefix=prefix)
for snap in snapshots:
self.verbose_print(" %s: start_time %s" % (snap['name'], snap['creationTimestamp']), prefix=prefix)
def get_volume_by_name(self, vol_name):
'''return a volume by its name'''
for vol in self.volumes:
if vol['name'] == vol_name:
return vol
return None
def get_snapshot_by_name(self, snap_name):
'''return a snap by its name'''
for snap in self.get_all_snapshots():
if snap['name'] == snap_name:
return snap
return None
def update_snapshots(self, upd_snap):
'''replace volume in self.snapshots'''
for idx, snap in enumerate(self.snapshots):
if snap['name'] == upd_snap['name']:
self.snapshots[idx] = upd_snap
break
else:
self.snapshots.append(upd_snap)
return True
def update_volume(self, upd_vol):
'''replace volume in self.volumes'''
for idx, vol in enumerate(self.volumes):
if vol['name'] == upd_vol['name']:
self.volumes[idx] = upd_vol
break
else:
self.volumes.append(upd_vol)
return True
def refresh_snapshot(self, snap_name):
'''return a snapshot'''
return self.scope.snapshots().get(project=self.project, snapshot=snap_name,).execute()
def refresh_volume(self, vol_name, zone):
'''return a volume'''
return self.scope.disks().get(project=self.project, zone=zone, disk=vol_name).execute()
def set_volume_label(self, volume_name, labels):
'''call setLabels on a volume'''
volume = self.get_volume_by_name(volume_name)
body = {}
body['labels'] = {}
if volume.has_key('labels'):
body['labels'] = volume['labels'].copy()
if not labels:
# we wanted empty labels
body['labels'] = {}
elif isinstance(labels, dict):
body['labels'].update(labels)
body['labelFingerprint'] = volume['labelFingerprint']
result = self.scope.disks().setLabels(project=self.project,
zone=os.path.basename(volume['zone']),
resource=volume['name'],
body=body,
).execute()
# Upon updating the labels the labelFingerprint changes. This needs to be refreshed.
fresh_vol = self.refresh_volume(volume['name'], os.path.basename(volume['zone']))
self.update_volume(fresh_vol)
return result
def set_snapshot_label(self, snap_name, labels):
'''call setLabels on a snapshot'''
snapshot = self.get_snapshot_by_name(snap_name)
body = {}
body['labels'] = {}
if snapshot.has_key('labels'):
body['labels'] = snapshot['labels'].copy()
if not labels:
# we wanted empty labels
body['labels'] = {}
elif isinstance(labels, dict):
body['labels'].update(labels)
body['labelFingerprint'] = snapshot['labelFingerprint']
result = self.scope.snapshots().setLabels(project=self.project,
resource=snapshot['name'],
body=body,
).execute()
# Upon updating the labels the labelFingerprint changes. This needs to be refreshed.
fresh_snap = self.refresh_snapshot(snapshot['name'])
self.update_snapshots(fresh_snap)
return result
| openshift/openshift-tools | openshift_tools/cloud/gcp/base.py | base.py | py | 8,744 | python | en | code | 161 | github-code | 13 |
27702316933 | #Title: MPB_post_processing.py
#Author: Tony Chang
#Date: 02.10.2015
#Abstract: This script takes the output from MPB_Cold_T_area_analysis_v1_3.py that is stored as a NetCDF4
# and compiles the data together to single variables, so that they can be accessed in a single manner
#
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
import shapefile
import gdal
import geotool as gt
import time
def shapeMask(sname,rname):
#takes a shapefile and generates a boolean mask from it (assumes single layer)
raster_ref = gdal.Open(rname)
sh_ds = shapefile.Reader(sname)
shape = sh_ds.shapes()
pnts = np.array(shape[0].points).T
fextent = gt.getFeatureExtent(pnts)
mask = gt.rasterizer(pnts, raster_ref)
mask_bool = np.where(mask==0, True, False)
return(mask_bool, pnts, fextent)
def outputAnnualSummary(v, startyear=1948, endyear =2011, mask = None):
ws = 'K:\\NASA_data\\mpb_model_out\\Annual\\'
f_head = 'GYE_mpb_out_'
sdata_mu = [] #spatial data
sdata_med = []
sdata_sd = []
sdata_min = []
sdata_max = []
data_ts = []
for y in range(startyear, endyear):
filename = '%s%s%s.nc'%(ws, f_head, y)
nc_ds = nc.Dataset(filename)
#we want to get the annual average of the year and the standard deviations
var_data = nc_ds.variables[v][:]
smu = np.mean(var_data, axis = 0)
smed = np.median(var_data, axis = 0)
ssd = np.std(var_data, axis = 0)
smin = np.min(var_data, axis = 0)
smax = np.max(var_data, axis = 0)
if mask != None: #if there is a mask to be applied
smu = np.ma.array(smu, mask = mask)
smed = np.ma.array(smed, mask = mask)
ssd = np.ma.array(ssd, mask = mask)
smin = np.ma.array(smin, mask = mask)
smax = np.ma.array(smax, mask = mask)
sdata_mu.append(smu) #get the mean for the area
sdata_med.append(smed)
sdata_sd.append(ssd)
sdata_min.append(smin)
sdata_max.append(smax)
#sdata_mu = np.array(sdata_mu)
#sdata_sd = np.array(sdata_sd)
if mask != None:
out = {'mu':np.ma.array(sdata_mu),'med':np.ma.array(sdata_med),'sd':np.ma.array(sdata_sd),'min':np.ma.array(sdata_min),'max':np.ma.array(sdata_max)}
else:
out = {'mu':np.array(sdata_mu),'med':np.array(sdata_med),'sd':np.array(sdata_sd), 'min':np.array(sdata_min), 'max':np.array(sdata_max)}
return(out)
def maskedMean(data):
out = np.zeros(len(data))
for i in range(len(data)):
out[i] = np.ma.mean(data[i])
return(out)
def maskedPercentile(data, lower =5, upper=95):
#takes in a masked array and returns the percentiles. Default is the 95 percentile
low = np.zeros(len(data))
high = np.zeros(len(data))
for i in range(len(data)):
low[i], high[i] = np.percentile(data[i].data[data[i].mask], (lower,upper))
return(low, high)
#var_values = ['tau', 'R', 'ptmin', 'lt50', 'survival', 'C', 'P1', 'P2' , 'P3' ]
startyear = 1948
endyear = 2011
'''
#if interested in the GYE average
sname = 'D:\\CHANG\\GIS_Data\\GYE_Shapes\\GYE.shp' #reference file for the GYE shape
rname = 'E:\\TOPOWX\\GYE\\tmin\\TOPOWX_GYE_tmin_1_1948.tif' #reference file for climate data
gyemask, pt, fex = shapeMask(sname, rname)
v_mu_gye, v_sd_gye = outputAnnualSummary(v_i, startyear, endyear, gyemask)
'''
#could use the WBP 2010 mask (remembering to take the inverse because 1 is False and 0 is True)
wbpmask = ~np.array(gdal.Open('E:\\WBP_model\\output\\prob\\WBP2010_binary.tif').ReadAsArray(), dtype = bool)
v_surv_wbp = outputAnnualSummary('survival', startyear, endyear, wbpmask)
v_tmin_wbp = outputAnnualSummary('ptmin', startyear, endyear, wbpmask)
v_lt50_wbp = outputAnnualSummary('lt50', startyear, endyear, wbpmask)
#summarize the masked rasters
#ts_mu_gye = maskedMean(v_mu_gye)
#ts_sd_gye = maskedMean(v_sd_gye)
ts_mu_wbp = maskedMean(v_surv_wbp['mu'])
ts_5_wbp, ts_95_wbp = maskedPercentile(v_surv_wbp['mu'])
ts_mu_tmin = maskedMean(v_tmin_wbp['mu'])
ts_min_tmin = maskedMean(v_tmin_wbp['min'])
ts_min_lt50 = maskedMean(v_lt50_wbp['min'])
ts_diff = ts_min_tmin-ts_min_lt50
#set the outputs wanted
t = np.arange(startyear, endyear)
#sample from a truncated normal distribution at each timestep
'''
"""
from scipy.stats import truncnorm
from scipy.stats import norm
n = 200
rand_samp = np.zeros((n,len(t)))
u_lim = 1.0 #probabilities can run between 0 and 1
l_lim = 0.0
a = (l_lim - ts_m)/ts_s
b = (u_lim - ts_m)/ts_s
for i in range(n):
for j in range(len(ts_m)):
rand_samp[i][j] = truncnorm.rvs(a[j],b[j],loc=ts_m[j],scale=ts_s[j])
"""
high = np.where(ts_mu_wbp ==np.max(ts_mu_wbp))[0][0]
low = np.where(ts_mu_wbp ==np.min(ts_mu_wbp))[0][0]
med = np.where(ts_mu_wbp ==np.median(ts_mu_wbp))[0][0]
pnts = np.array([[t[low], t[med], t[high]],[ts_mu_wbp[low],ts_mu_wbp[med],ts_mu_wbp[high]]])
pntlabels = ['Low','Med','High']
txtpnts = np.array([[pnts[0][0],pnts[0][1],pnts[0][2]],[pnts[1][0]-0.06,pnts[1][1]-0.08,pnts[1][2]+0.02]])
plt.rcParams['figure.figsize'] = 8,4
ax = plt.subplot(111)
#ax.scatter(pnts[0], pnts[1], color ='blue')
#not considering the bootstrap sampling
#for i in range(n):
# ax.plot(t, rand_samp[i], color = 'orange', alpha = 0.03)
ax.plot(t,ts_mu_wbp, color = 'red', lw = 1.5, label = 'wbp')
ax.fill_between(t, ts_5_wbp, ts_95_wbp, color = 'orange', alpha=0.2)
#ax.scatter(pnts[0], pnts[1], color ='blue')
#this plots temperature on the same figure
#ax2 = ax.twinx()
#ax2.plot(t,ts_mu_tmin, color = 'blue', lw = 1, ls = '--', label = 'wbp')
#ax.tick_params(axis='y', colors='black')
#ax2.tick_params(axis='y', colors='blue')
#ax2.set_ylabel(r'$\tau_{min}$ $(^oC)$', fontsize = 14, color = 'blue')
#for i, txt in enumerate(pntlabels):
# ax.annotate('%s'%(txt), (pnts[0][i], pnts[1][i]),xytext=(txtpnts[0][i],txtpnts[1][i]))
ax.set_ylabel('$P(%s)$'%('survival'), fontsize = 14, color = 'black')
ax.set_xlim(startyear, endyear)
ax.set_xlabel('$Time$', fontsize = 14)
plt.title('GYE MPB population survival 1948-2011')
plt.grid()
plt.savefig('E:\\mpb_model\\climate_application\\output\\survival_plot_%s.png'%(time.strftime("%m%d%Y")
), dpi = 600, bbox_inches = 'tight')
'''
#now we would like to highlight what the low point and the high points look like spatially
'''
plt.rcParams['figure.figsize'] = 10,8
ae = [-112.39166727055475, -108.19166728736126, 42.27499982, 46.19166648]
i=0
fig = plt.figure()
ax1 = plt.subplot(131)
a = ax1.imshow(v_surv_wbp['mu'][low], vmin = 0, vmax = 1, extent = ae)
ax1.set_title('%s (year = %i)' %(pntlabels[i],pnts[0][i]), fontsize = 16)
ax1.locator_params(nbins=4)
ax1.grid()
ax1.set_xlabel('Longitude (DD)')
ax1.set_ylabel('Latitude (DD)')
i =1
ax2 = plt.subplot(132)
b = ax2.imshow(v_surv_wbp['mu'][med], vmin = 0, vmax = 1, extent = ae)
ax2.set_title('%s (year = %i)' %(pntlabels[i],pnts[0][i]), fontsize = 16)
ax2.locator_params(nbins=4)
ax2.grid()
ax2.set_xlabel('Longitude (DD)')
ax2.set_ylabel('Latitude (DD)')
i=2
ax3 = plt.subplot(133)
c = ax3.imshow(v_surv_wbp['mu'][high], vmin = 0, vmax = 1, extent = ae)
ax3.set_title('%s (year = %i)' %(pntlabels[i],pnts[0][i]), fontsize = 16)
ax3.locator_params(nbins=4)
ax3.grid()
ax3.set_xlabel('Longitude (DD)')
ax3.set_ylabel('Latitude (DD)')
cbaxes = fig.add_axes([0.14, 0.2, 0.76, 0.04])
cb = fig.colorbar(b, orientation ='horizontal', cax = cbaxes)
cb.set_label('$P(survival)$', fontsize = 18)
cb.ax.tick_params(labelsize = 14)
fig.tight_layout()
plt.savefig('E:\\mpb_model\\climate_application\\output\\survival_spatial_%s.png'%(time.strftime("%m%d%Y")
), dpi = 600, bbox_inches = 'tight')
'''
#now we need to plot the tmin and lt50 against time
'''
plt.rcParams['figure.figsize'] = 8,4
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(t, ts_min_tmin, color = 'blue', lw = 1.5, label = r'$\tau_{min}$')
ax.plot(t, ts_min_lt50, ls = ':', color = 'green', lw = 1.5, label = r'$LT_{50}$')
#ax.fill_between(t,ts_min_tmin,ts_min_lt50, color ='grey', alpha =0.2)
ax.grid()
ax.set_xlim(t[0], t[-1])
ax.set_xlabel('$Time$', fontsize = 18)
ax.set_ylabel('$Temperature$ $(^oC)$', fontsize =18)
ax.legend(loc='lower right')
plt.savefig('E:\\mpb_model\\climate_application\\output\\temp_compare_%s.png'%(time.strftime("%m%d%Y")
), dpi = 600, bbox_inches = 'tight')
'''
'''
plt.rcParams['figure.figsize'] = 12,4
import matplotlib.gridspec as gs
fig = plt.figure()
gax = gs.GridSpec(1,4)
ax1 = plt.subplot(gax[0,:-1])
n_diff = (ts_diff-np.min(ts_diff))/(np.max(ts_diff)-np.min(ts_diff))
n_surv = (ts_mu_wbp-np.min(ts_mu_wbp))/(np.max(ts_mu_wbp)-np.min(ts_mu_wbp))
ax1.plot(t, n_diff, color = 'purple', ls="-.", lw = 2, label = r'$\widehat{\tau_{min}-LT_{50}}$')
ax1.plot(t, n_surv, color='red', label = r'$\widehat{P(survival)}$')
ax1.set_xlabel('$Time$', fontsize = 18)
ax1.set_ylabel('$Normalized$ $value$', fontsize = 18)
ax1.set_xlim(t[0], t[-1])
ax1.legend(loc ='lower right', fontsize = 10)
ax1.grid()
ax2 = plt.subplot(gax[0,-1])
ax2.scatter(n_diff, n_surv, marker = 'o')
ax2.plot(np.array([0,1]),np.array([0,1]), ls = ':', lw = 2)
ax2.set_xlim(0, 1)
ax2.set_ylim(0, 1)
ax2.set_xlabel(r'$\widehat{\tau_{min}-LT_{50}}$', fontsize = 18)
ax2.set_ylabel(r'$\widehat{P(survival)}$', fontsize = 18)
ax2.grid()
fig.tight_layout()
plt.savefig('E:\\mpb_model\\climate_application\\output\\%s\\temp_thr_surv_%s.png'%(time.strftime("%m%d%Y"),time.strftime("%m%d%Y")
), dpi = 600, bbox_inches = 'tight')
'''
#plot with np.linalg.norm
'''
ax.grid()
ax.set_xlim(t[0], t[-1])
ax.legend()
ax2 = plt.subplot(212)
ax2.plot(t, ts_diff/np.linalg.norm(ts_diff), color = 'r')
ax2.plot(t, ts_mu_wbp/np.linalg.norm(ts_mu_wbp), color='blue')
plt.show()
''' | tonychangmsu/Python_Scripts | eco_models/mpb/MPB_post_processing_v1_2.py | MPB_post_processing_v1_2.py | py | 9,412 | python | en | code | 0 | github-code | 13 |
21322024206 | import math
import os
import hashlib
from urllib.request import urlretrieve
import zipfile
import gzip
import shutil
import numpy as np
from PIL import Image
from tqdm import tqdm
def _read32(bytestream):
"""
Read 32-bit integer from bytesteam
:param bytestream: A bytestream
:return: 32-bit integer
"""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def _unzip(save_path, _, database_name, data_path):
"""
Unzip wrapper with the same interface as _ungzip
:param save_path: The path of the gzip files
:param database_name: Name of database
:param data_path: Path to extract to
:param _: HACK - Used to have to same interface as _ungzip
"""
print('Extracting {}...'.format(database_name))
with zipfile.ZipFile(save_path) as zf:
zf.extractall(data_path)
def _ungzip(save_path, extract_path, database_name, _):
"""
Unzip a gzip file and extract it to extract_path
:param save_path: The path of the gzip files
:param extract_path: The location to extract the data to
:param database_name: Name of database
:param _: HACK - Used to have to same interface as _unzip
"""
# Get data from save_path
with open(save_path, 'rb') as f:
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number {} in file: {}'.format(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols)
# Save data to extract_path
for image_i, image in enumerate(
tqdm(data, unit='File', unit_scale=True, miniters=1, desc='Extracting {}'.format(database_name))):
Image.fromarray(image, 'L').save(os.path.join(extract_path, 'image_{}.jpg'.format(image_i)))
def download_extract(data_name, data_path):
"""
Download and extract database
:param database_name: Database name
"""
DATASET_DOG_NAME = 'dog-data'
DATASET_HUMAN_NAME = 'human-data'
if data_name == DATASET_DOG_NAME:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip'
extract_path = os.path.join(data_path, 'dogImages')
save_path = os.path.join(data_path, 'dog.zip')
extract_fn = _unzip
elif data_name == DATASET_HUMAN_NAME:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip'
extract_path = os.path.join(data_path, 'lfw')
save_path = os.path.join(data_path, 'human.zip')
extract_fn = _unzip
if os.path.exists(extract_path):
print('Found {} Data'.format(data_name))
return
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(data_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
os.makedirs(extract_path)
try:
extract_fn(save_path, extract_path, data_name, data_path)
except Exception as err:
shutil.rmtree(extract_path) # Remove extraction folder if there is an error
raise err
# Remove compressed data
os.remove(save_path)
def dnld_bottleneck(data_name):
"""
Download and extract database
:param database_name: Database name
"""
data_path = './bottleneck_features'
DATASET_VGG16 = 'vgg16'
DATASET_VGG19 = 'vgg19'
DATASET_INCEPV3 = 'inception'
DATASET_RESNET = 'resnet'
DATASET_XCEPTION = 'xception'
if data_name == DATASET_VGG16:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG16Data.npz'
save_path = os.path.join(data_path, 'DogVGG16Data.npz')
elif data_name == DATASET_VGG19:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG19Data.npz'
save_path = os.path.join(data_path, 'DogVGG19Data.npz')
elif data_name == DATASET_INCEPV3:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz'
save_path = os.path.join(data_path, 'DogInceptionV3Data.npz')
elif data_name == DATASET_RESNET:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz'
save_path = os.path.join(data_path, 'DogResnet50Data.npz')
elif data_name == DATASET_XCEPTION:
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogXceptionData.npz'
save_path = os.path.join(data_path, 'DogXceptionData.npz')
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(data_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
class DLProgress(tqdm):
"""
Handle Progress Bar while Downloading
"""
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
"""
A hook function that will be called once on establishment of the network connection and
once after each block read thereafter.
:param block_num: A count of blocks transferred so far
:param block_size: Block size in bytes
:param total_size: The total size of the file. This may be -1 on older FTP servers which do not return
a file size in response to a retrieval request.
"""
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
| justputitdown/DLND | 2_cnn_dog_project/data_dl.py | data_dl.py | py | 6,016 | python | en | code | 0 | github-code | 13 |
21676003892 | def solution(sizes):
bigger, smaller = 0, 0
for s1, s2 in sizes:
# 큰 순으로 정렬
if s1 < s2:
s1, s2 = s2, s1
# 대소비교
bigger = max(bigger, s1)
smaller = max(smaller, s2)
return bigger*smaller | SangHyunGil/Algorithm | Programmers/Lv1/최소직사각형(Python).py | 최소직사각형(Python).py | py | 290 | python | en | code | 0 | github-code | 13 |
71398697617 | from tilt_detector import TiltDetector, LineMerger
from utils import ResultsHandler
from concrete_polygon_extractor import LineExtender, PolygonRetriever
import os
import argparse
import sys
import cv2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image', type=str, help='Path to an image.')
parser.add_argument('--folder', type=str, help="Path to a folder with images to process")
parser.add_argument('--save_path', type=str, default=None, help="If None, don't save results, show them")
parser.add_argument('--retrieve', type=int, default=0, help="Retrieve image section defined by the two pole"
"edge lines detected")
arguments = parser.parse_args()
return arguments
def main():
arguments = parse_args()
assert arguments.image or arguments.folder, "No input data provided"
images_to_process = list()
if arguments.image:
if os.path.isfile(arguments.image):
images_to_process.append(arguments.image)
else:
print("ERROR: Provided image is not an image")
sys.exit()
else:
if not os.path.isdir(arguments.folder):
print("ERROR: Provided folder is not a folder")
sys.exit()
for image_name in os.listdir(arguments.folder):
if not any(image_name.endswith(ext) for ext in [".jpg", ".png", ".jpeg", ".JPG", ".JPEG", ".PNG"]):
continue
images_to_process.append(os.path.join(arguments.folder, image_name))
# If save path has been provided, all images will be processed and saved there
# Otherwise, after each image gets processed it will be shown to a user until
# he clicks a button to proceed to the next image if any.
# In order to just calculate and receive angle = flag is 0
if arguments.save_path is not None:
if not os.path.exists(arguments.save_path):
os.mkdir(arguments.save_path)
results_handling = 1, arguments.save_path
handler = ResultsHandler(save_path=arguments.save_path)
else:
results_handling = 0, ''
handler = None
merger = LineMerger()
detector = TiltDetector(results_handling_way=results_handling,
line_merger=merger,
results_processor=handler)
total_error = 0
images_with_calculated_angles = 0
images_without_angle_calculated = []
for path_to_image in images_to_process:
image_name = os.path.split(path_to_image)[-1]
print(image_name)
# Find lines, calculate the angle
predicted_tilt_angle, the_lines = detector.process_image(path_to_image)
# Keep track of the error
if predicted_tilt_angle is not None:
# CHANGE ME BACK, FOR NEW IMGS TESTING WITHOUT ANGLE
# truth_angle = float(image_name[3:7])
truth_angle = 3
difference = abs(truth_angle - predicted_tilt_angle)
error = round(difference / truth_angle, 3)
print("Error:", error)
total_error += error
images_with_calculated_angles += 1
else:
images_without_angle_calculated.append(image_name)
if not the_lines:
print("Failed to detect any lines for:", image_name)
continue
assert 1 <= len(the_lines) <= 2, "Wrong number of lines!"
# Retrieve area defined by the lines for future cracks detection
if arguments.retrieve and the_lines:
line_extender = LineExtender()
polygon_retriever = PolygonRetriever(line_extender=line_extender)
clean_image=polygon_retriever.resize_for_nn(path_to_image, the_lines, width = 224, height = 1120)
#concrete_polygon = polygon_retriever.retrieve_polygon(path_to_image,the_lines)
#print("THIS IS CONCRETE POLYGON",concrete_polygon)
cv2.imwrite(os.path.join(arguments.save_path, image_name), clean_image)
# DELETE ME I AM FOR TESTING
#handler.save_image(image_name, concrete_polygon)
#handler.save_image_1(image_name, clean_image)
if images_with_calculated_angles > 0:
mean_error = round(total_error / images_with_calculated_angles, 3)
print("\nMEAN ERROR:", mean_error * 100, "%")
else:
print("\nCannot calculate MEAN ERROR. Failed to calculate the angle for"
"any images")
if images_without_angle_calculated:
print("\nFAILED TO CALCULATE ANGLE FOR:",
' '.join(map(str, images_without_angle_calculated)))
if __name__ == "__main__":
main()
| dashos18/resizing-for-pole-extracted | main.py | main.py | py | 4,708 | python | en | code | 0 | github-code | 13 |
74909558096 |
num_de_n = int(input())
pesos = []
for i in range(0, num_de_n):
if i % 2 == 0:
pesos.append(2)
else:
pesos.append(4)
pesos[0] = 1
pesos[num_de_n - 1] = 1
print(pesos) | Teuszin/Calculo-Numerico | Listas_do_Lop/Lista_06/Testes.py | Testes.py | py | 194 | python | es | code | 0 | github-code | 13 |
41974526102 | import requests
import random
import time
user_agents = [
"Mozilla/5.0 (iPhone; CPU iPhone OS 17_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (PlayStation 5 8.20) AppleWebKit/601.2 (KHTML, like Gecko)",
"Mozilla/5.0 (PlayStation 4 11.00) AppleWebKit/601.2 (KHTML, like Gecko)",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Mozilla/5.0 (Linux; Android 14; SM-A205U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.6045.134 Mobile Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.2151.58"
]
def random_line(filename):
with open(filename, 'r', encoding='utf-8') as file:
lines = file.readlines()
return random.choice(lines).strip()
def generate_random_email(first_name, last_name):
domains = ["pornhub.com", "gmail.com", "yahoo.com", "youporn.com", "gaypornhd.com", "gmx.de", "gmx.net"]
domain = random.choice(domains)
random_number = random.randint(1000, 9999)
email = f"{first_name.lower()}.{last_name.lower()}{random_number}@{domain}"
return email
def RegisterFakeRefUsers():
url = "https://www.predecessorgame.com/api/ps-beta-signup"
proxies = [
'http://127.0.0.1' # proxies
]
num_iterations = 5
hours_to_sleep = 1 # Set the number of hours to sleep
total_iterations = 0
while True: # Run indefinitely
for _ in range(num_iterations):
for attempt in range(5): # Retry 5 times if request fails
random_first_name = random_line("FirstName.txt")
random_last_name = random_line("LastName.txt")
random_email = generate_random_email(random_first_name, random_last_name)
proxy = proxies[attempt % len(proxies)]
user_agent = random.choice(user_agents)
headers = {
"Host": "www.predecessorgame.com",
"Cookie": "_ga=GA1.1.1934670820.1699898414; _fbp=fb.1.1699920678399.575969818; _csrf=hvDY_0oLHn0g8_5otiKfoy7s; _gcl_au=1.1.2108489854.1699898440.144178858.1699922231.1699922230; _ga_TCC8E8VM94=GS1.1.1699920339.2.1.1699922554.0.0.0",
"Content-Length": "532",
"Sec-Ch-Ua": '"Chromium";v="119", "Not?A_Brand";v="24"',
"Content-Type": "application/json",
"Csrf-Token": "47XyhwVN-UbgbZM-aFG6ZzBwJFWIhGHDaG-Q",
"Sec-Ch-Ua-Mobile": "?0",
"User-Agent": user_agent, # Use the selected user agent
"Sec-Ch-Ua-Platform": '"Windows"',
"Accept": "*/*",
"Origin": "https://www.predecessorgame.com",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://www.predecessorgame.com/play/playstation-beta-access?ref_id=",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7",
"Priority": "u=1, i"
}
data = {
"email": random_email,
"firstName": f"New{random_first_name}",
"lastName": f"New{random_last_name}",
"answers": [
{"question_value": "Which platform would you like to participate on?", "answer_value": " PlayStation 5"},
{"question_value": "Which region will you participate from?", "answer_value": " Europe"},
{"question_value": "Would you like to receive news, special offers, feedback surveys and playtest invitations from Omeda Studios?", "answer_value": "Yes"}
],
"referral": "https://www.predecessorgame.com/play/playstation-beta-access?ref_id="
}
try:
response = requests.post(url, json=data, headers=headers, proxies={'http': proxy}, timeout=10)
response.raise_for_status()
print("Generated Email:", random_email)
print(f"Used proxy: {proxy}")
print("Referral:", data["referral"])
print("User Agent:", user_agent)
print("Response status code:", response.status_code)
print("Response text:", response.text, "\n")
break # Break the inner loop if the request is successful
except requests.exceptions.RequestException as e:
print(f"Error making request: {e}. Retrying...\n")
time.sleep(1) # Sleep for 1 second before retrying
#time.sleep(hours_to_sleep * 3600) # Sleep for the specified number of hours
total_iterations += 1
if total_iterations % 5 == 0:
print(f"Total iterations: {total_iterations}. Sleeping for an hour...")
time.sleep(hours_to_sleep * 3600) # Sleep for an hour before resetting num_iterations to 0
# Call the function
RegisterFakeRefUsers()
| HoppersPS4/PredecessorGameQueSkipper | main.py | main.py | py | 5,303 | python | en | code | 0 | github-code | 13 |
41115668185 | import os
import sys
import lxml
from lxml import etree
def domainfromurl(url):
x = url.replace('https://', '')
y = x.split('/')
return y[0]
def nobindfromurl(url):
x = url.split(';')
return x[1].replace('nobind=', '')
def ipfromurl(url):
x = url.replace('https://', '')
return True
def url_split(url):
result = {"url": "", 'domain': '', "ip": "", "nobind": 0}
result["url"] = url
result["domain"] = domainfromurl(url)
result['nobind'] = nobindfromurl(url)
return result
def config_files():
config_folder = '/usr/local/roxen/configurations/'
result = []
for item in os.listdir(config_folder):
if not item.endswith(
'~') and not item == 'Administration_Interface' and not item == '_roxen_pid' and not item == 'Global_Variables':
file_path = os.path.join(config_folder, item)
if os.path.isfile(file_path):
result.append(item)
return result
def url_list(sitename):
urls = []
filename = '/usr/local/roxen/configurations/' + sitename
with open(filename, 'r') as f:
try:
s = f.read()
s = "<root>" + s + "</root>"
parser = etree.XMLParser(ns_clean=True, recover=True)
tree = etree.fromstring(unicode(s, "utf-8", errors='ignore'), parser)
for region in tree.iterfind("roxen-config/region"):
for key, value in region.items():
if value.startswith('spider'):
for child in region:
for key, value in child.items():
if value == 'URLs':
for url in child[0]:
url_result = {'url': '', 'domain': '', 'ip': '', 'nobind': ''}
split_url = url_split(url.text)
url_result['url'] = split_url['url']
url_result['domain'] = split_url['domain']
url_result['ip'] = 0
url_result['nobind'] = split_url['nobind']
if url_result['url'] != '':
urls.append(url_result)
except Exception as e:
print("Could not parse:" + filename + ' ' + str(sys.exc_info()[1]))
return urls
def mountpoint_list(sitename):
mountpoints = []
filename = '/usr/local/roxen/configurations/' + sitename
with open(filename, 'r') as f:
try:
s = f.read()
s = "<root>" + s + "</root>"
parser = etree.XMLParser(ns_clean=True, recover=True)
tree = etree.fromstring(unicode(s, "utf-8", errors='ignore'), parser)
for region in tree.iterfind("roxen-config/region"):
for key, value in region.items():
if value.rpartition('#')[0] == 'filesystem':
mountpoint_result = {'mountpoint': '', 'searchpath': ''}
for child in region:
for key, value in child.items():
mountpoint = ''
searchpath = ''
if value == 'mountpoint':
mountpoint = child[0].text
mountpoint_result['mountpoint'] = mountpoint
if value == 'searchpath':
searchpath = child[0].text
mountpoint_result['searchpath'] = searchpath
mountpoints.append(mountpoint_result)
except Exception as e:
print("Could not parse:" + filename + ' ' + str(sys.exc_info()[1]))
return mountpoints
| whojarr/roxentools | roxentools/config.py | config.py | py | 3,911 | python | en | code | 1 | github-code | 13 |
21268503858 | import json,sys,re
json_path="/home/fux/fux/miRNASNP3/predict_result/altutr/Targetscan/tga_altutr_compltet_chr_json/"
bed_path="/home/fux/fux/miRNASNP3/predict_result/altutr/Targetscan/tga_altutr_compltet_chr_bed/"
in_path="/home/fux/fux/miRNASNP3/predict_result/altutr/Targetscan/tga_altutr_compltet_chr/"
f=sys.argv[1]
fi=f.split('.')[0]
with open(json_path+fi+".json","a") as jsonout:
temp_json={}
with open(bed_path+f,"a") as bedout:
with open(in_path+f) as infile:
for line in infile:
nline=re.split(r':|#|\t',line.strip())
strand=re.split(r'\(|\)',nline[1])[1]
if strand=='-':
site_start=int(nline[10])-int(nline[14])
site_end=int(nline[10])+int(nline[13])
else:
site_start=int(nline[9])+int(nline[13])
site_end=int(nline[9])+int(nline[14])
newline=nline[0]+'\t'+str(site_start)+'\t'+str(site_end)+'\t'+nline[11]+'#'+nline[6]+'#'+nline[3]
newkey=nline[0]+'#'+str(site_start)+'#'+str(site_end)+'#'+nline[11]+'#'+nline[6]+'#'+nline[3]
if newkey in temp_json.keys():
temp_json[newkey].append(line.strip())
else:
temp_json[newkey]=[line.strip()]
bedout.write(newline+'\n')
json.dump(temp_json,jsonout)
| chunjie-sam-liu/miRNASNP-v3 | scr/predict_result/altutr/B-01-altutr-tgs-bed.py | B-01-altutr-tgs-bed.py | py | 1,415 | python | en | code | 3 | github-code | 13 |
74136034898 |
def is_anagram(first_string, second_string):
first_str = list(first_string.lower())
second_str = list(second_string.lower())
if (len(first_str) != len(second_str)):
return False
for value in first_str:
try:
second_str.remove(value)
except ValueError:
return False
return True
| magno-vicentini/project-algorithms | challenges/challenge_anagrams.py | challenge_anagrams.py | py | 349 | python | en | code | 0 | github-code | 13 |
6922236318 | '''
Given an array of n integers nums and a target, find the number
of index triplets i, j, k with 0 <= i < j < k < n that
satisfy the condition nums[i] + nums[j] + nums[k] < target.
Example:
Input:
'''
class Solution:
def hash(self,nums):
hashmap={}
for num in nums:
if num not in hashmap:
hashmap[num]=1
else:
hashmap[num]+=1
return hashmap
def complement_map(self,nums,target):
complement_map = {}
for num in nums:
complement_map[num]= target-num
return complement_map
def threeSumSmaller(self,nums,target):
#In Brute force we would have a computational Complexity
#of n^3
#we reduce the computtional complexity to on²
#Search for the hashmap
#complement_map is a dict:
complement_map = self.complement_map(nums,target)
#Keep track of indices:
index = []
k=1
for i in range(len(nums)):
for j in range(1,len(nums)-k):
if nums[i] < target and nums[j] < target: #if the numbers are greater
#than the target- ignore them.
if nums[j]+nums[j+k]<complement_map[nums[i]]:
index.append(nums[i])
index.append(nums[j])
index.append(nums[j+k])
#k+=1
return index
#Another approach 2 pointers.
#Divide and ocnquer approach: Start from both side.
#2 Pointers approach
#sort array of length n: nlogn
#perform binary search from the middle. --> such that sum = target - nums[i]
#
class Solution_2pointers:
def threeSumSmaller(self,nums,target):
sum = 0
#sort the array.
sorted_nums = sorted(nums)
for i in range(len(sorted_nums)-2):
sum+=self.twoSumSumaller(sorted_nums,i+1,target-sorted_nums[i])
return sum
#We first work on twoSumSumaller.
#we use 2 pointers from right and left.
def twoSumSumaller(self,nums,startindex,target):
sum = 0
left = startindex
right = len(nums)-1 #since we want to index the array
#We have 2 pointers left and right.
#we do not allow tot croos over
while (left <right):
#we move left to more right.
#we move right to more left.
if (nums[left]+nums[right])<target:
sum +=right-left
left+=1
else:
right -=1
return sum
'''
Here the time complexity is: O(n)*log(n)*n --> O(n²log(n))
Space Complexity: O(1)
'''
if __name__ == "__main__":
nums = [3,5,2,8,1]
target = 9
object = Solution()
print ('The complement_map is:',object.complement_map(nums,target))
print ('The sum combinations are:',object.threeSumSmaller(nums,target))
object_2pointers = Solution_2pointers()
print ('The sum combinations are:',object_2pointers.threeSumSmaller(nums,target))
| Oushesh/CODING_INTERVIEW | LeetCode/Apple/OnSite/threeSumSmaller.py | threeSumSmaller.py | py | 2,997 | python | en | code | 0 | github-code | 13 |
27705286602 | import os
import sentencepiece
import collections
import re
class Patterns:
SELF_BREAK_TOKEN = r'<selfbr>'
SELF_BREAK_RGX = re.compile(SELF_BREAK_TOKEN)
GET_SUBMISSION_SELF_TEXT_RGX = re.compile(
r'(?<=%s).*$' % SELF_BREAK_TOKEN, re.DOTALL)
BOT_BODY_RGX = re.compile(
r"""^i am a bot|^i\'m a bot|^bleep.*?bloop|^beep.*?boop|i am a bot[^a-zA-Z]*$
|^i\'m a bot[^a-zA-Z]*$|bleep.*?bloop[^a-zA-Z]*$|beep.*?boop[^a-zA-Z]*$'""",
re.I)
BOT_BUTTON_RGX = re.compile(r'\^\|\s*\^\[')
BOT_AUTHOR_PATTERNS = [
r'^imgur',
r'^linkfixer',
r'bots?[^a-zA-Z]*$',
r'tips?$',
r'quotes$',
r'transcriber$',
r'watch$',
r'breaker$',
r'fixer$',
]
BOT_AUTHOR_RGX = re.compile('|'.join(BOT_AUTHOR_PATTERNS), re.I)
# Markdown tables: https://www.markdownguide.org/extended-syntax/#tables
DETECT_MARKDOWN_TABLE_RGX = re.compile(r'(\|\s*:?--*:?\s*\|)|(\+----*)')
ALPHANUM_RGX = re.compile(r'[a-zA-Z0-9]')
ANY_UPPERCASE_RGX = re.compile(r'[A-Z]')
BZ2_EXT_RGX = re.compile(r'\.(bz2|bzip2)(\-luigi\-tmp\-\d*)?$')
XZ_EXT_RGX = re.compile(r'\.xz(\-luigi\-tmp\-\d*)?$')
GZ_EXT_RGX = re.compile(r'\.(gz|gzip)(\-luigi\-tmp\-\d*)?$')
TXT_TSV_EXT_RGX = re.compile(r'\.(txt|tsv)(\-luigi\-tmp\-\d*)?$')
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for token in tokens:
token = token.rstrip().split("\t")[0].strip()
if not token:
token = "[Occupy]"
vocab[token] = index
index += 1
print("we totally load " + str(len(vocab)) + " tokens")
return vocab
class SPETokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, bpe_model_file, max_len=None):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
# with open(vocab_file,'r') as f_vocab:
# self.vocab = json.loads(f_vocab.readline().strip())
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.sp_tokenizer = sentencepiece.SentencePieceProcessor()
self.sp_tokenizer.Load(bpe_model_file)
self.max_len = max_len if max_len is not None else int(1e12)
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text, adversarial=False):
split_tokens = ["<unk>" if x not in self.vocab else x for x in self.sp_tokenizer.EncodeAsPieces(text)]
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_token_to_id(self, token):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_path):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
vocab_file = os.path.join(pretrained_model_path, "vocab.txt")
bpe_file = os.path.join(pretrained_model_path, "sentencepiece.bpe.model")
tokenizer = cls(vocab_file, bpe_file)
return tokenizer
| jessicazhu191/Reddit-Download | util.py | util.py | py | 4,199 | python | en | code | 0 | github-code | 13 |
20474913617 | import logging
import json
import os
import boto3
logger = logging.getLogger("lookup-runner")
logger.setLevel(logging.DEBUG)
def handler(event, context):
job_name = get_job_name(event)
debug_print(f"Job name: {job_name}")
runner = find_runner_for_job(job_name=job_name)
return {
"Arn": runner['arn'],
"Type": runner['type']
}
def get_job_name(event):
return event['detail']['build_name']
def find_runner_for_job(job_name):
pk = f"job#{job_name}"
dynamodb_client = boto3.client('dynamodb')
response = dynamodb_client.query(
TableName=os.environ["RUNNERS_TABLE"],
KeyConditionExpression='pk = :pk',
ExpressionAttributeValues={
':pk': {'S': pk}
}
)
if len(response['Items']) > 0:
runner = {
"type": response['Items'][0]['type']['S']
}
if runner['type'] == 'LAMBDA':
runner['arn'] = response['Items'][0]['arn']['S'],
else:
runner['arn'] = '-'
return runner
# No runner found, return default
return {
"arn": os.environ['DEFAULT_RUNNER'],
"type": "LAMBDA"
}
def debug_print(message):
logger.debug(message)
| JimmyDqv/gitlab-runners-on-aws | AutoScaler/lambdas/lookup/lookup-runner.py | lookup-runner.py | py | 1,224 | python | en | code | 13 | github-code | 13 |
34914414063 | import numpy as np
from patteRNA import rnalib
class Transcript:
def __init__(self, name, seq, obs):
self.name = name
self.seq = seq
self.obs = np.array(obs)
self.T = len(obs)
self.obs_dom = None
self.ref = None
self.alpha = None
self.beta = None
self.c = None
self.B = None
self.gamma = None
self.gamma_gmm_k = None
self.log_B_ratio = None
self.mask_0 = self.obs <= 0
self.mask_nan = np.isnan(self.obs)
self.mask_finite = np.isfinite(self.obs)
self.density = 1-np.sum(self.mask_nan)/self.T
self.valid_sites = dict()
self.nan_sites = dict()
def log_transform(self):
self.mask_finite = np.invert(self.mask_0 | self.mask_nan)
self.obs[self.mask_finite] = np.log(self.obs[self.mask_finite])
self.obs[self.mask_0] = -np.Inf
def find_valid_sites(self, motif):
self.valid_sites[motif] = set()
pairing_table, ups = rnalib.compute_pairing_partners(motif)
m = len(motif)
for i in range(self.T - m + 1):
if rnalib.is_valid_pairing(self.seq[i:i+m], pairing_table):
self.valid_sites[motif].add(i)
return pairing_table, ups
def find_nan_sites(self, length):
self.nan_sites[length] = set()
for i in range(self.T - length + 1):
if np.all(self.mask_nan[i:i+length]):
self.nan_sites[length].add(i)
def compute_log_B_ratios(self):
self.log_B_ratio = np.zeros((2, self.T), dtype=float)
self.log_B_ratio[0, :] = np.log(self.B[0, :] / self.B[1, :])
self.log_B_ratio[1, :] = -1 * self.log_B_ratio[0, :]
def enforce_reference(self, ref):
self.ref = ref
self.B = np.zeros((2, self.T), dtype=float)
self.B[0, :] = 1 - np.array(ref)
self.B[1, :] = np.array(ref)
self.gamma = np.zeros((2, self.T), dtype=float)
self.gamma[0, :] = 1 - np.array(ref)
self.gamma[1, :] = np.array(ref)
| AviranLab/patteRNA | src/patteRNA/Transcript.py | Transcript.py | py | 2,058 | python | en | code | 12 | github-code | 13 |
37784894142 | import cv2
import numpy as np
import time
#Load the camera
time.sleep(3)
cap = cv2.VideoCapture(0)
print("Opening Camera ... ")
for i in range (60):
_,background = cap.read()
background = np.flip(background, axis = 1)
while cap.isOpened() :
ret,frame = cap.read()
if ret ==False :
print("There seems to a problem with the camera")
break
else :
frame = np.flip(frame , axis = 1)
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) #converting rgb color space to hsv
#values for custom color
lower = np.array([86,69,27])
upper = np.array([118,162,237])
mask1 = cv2.inRange(hsv,lower,upper)
#Mask erosion and dilation
print("creating magic~!")
kernel =np.ones([3,3],np.uint8)
mask1= cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, kernel)
mask1= cv2.morphologyEx(mask1, cv2.MORPH_DILATE, kernel)
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(background, background, mask = mask1) #colored part
res2 = cv2.bitwise_and(frame, frame, mask = mask2) #Everything outside the color
final_output = cv2.addWeighted(res1, 1, res2, 1, 0)
#Display output
print("Displaying output..")
cv2.imshow("frame",final_output)
if cv2.waitKey(1) == 27 :
break
cv2.destroyAllWindows()
cap.release()
| abdulahad01/ComputerVision-Projects | invisible cloth.py | invisible cloth.py | py | 1,423 | python | en | code | 5 | github-code | 13 |
15302358436 | #!/usr/bin/python3
from re import I
import numpy as np
import matplotlib.pyplot as plt
import sys
import math
file_list = []
opr_name = ["Insert", "Delete", "Update", "Read"]
rm_cnt = 0
mark_split_file = ""
mark_merge_file = ""
# Processing cmd arg
if len(sys.argv) > 1:
state = 0
for i in range(1, len(sys.argv)):
# Number of removal
if sys.argv[i] == "-r":
state = 1
continue
# Mark special OP# file, s - split, m - merge
if sys.argv[i] == "-s":
state = 2
continue
if sys.argv[i] == "-m":
state = 3
continue
if state == 0:
file_list.append(sys.argv[i])
elif state == 1:
rm_cnt = int(sys.argv[i])
if rm_cnt < 0:
print("Error: #remove must be >= 0")
sys.exit()
state = 0
elif state == 2:
mark_split_file = sys.argv[i]
state = 0
elif state == 3:
mark_merge_file = sys.argv[i]
state = 0
else:
print("Usage: {} input files ... [-r #remove] [-m mark OP# file]".format(sys.argv[0]))
sys.exit()
perf = [[] for i in range(len(opr_name))]
for FILE in file_list:
# read opr.dat
file_opr = open(FILE, "r")
i_perf = []
d_perf = []
r_perf = []
u_perf = []
while True:
nstr = file_opr.readline()
# read until EOF
if len(nstr) == 0:
break
token = nstr.split('\t')
# Operation data
if(token[0][0] == 'i'):
i_perf.append(float(token[3]))
elif(token[0][0] == 'd'):
d_perf.append(float(token[2]))
elif(token[0][0] == 'u'):
u_perf.append(float(token[3]))
elif(token[0][0] == 'r'):
r_perf.append(float(token[3]))
if(len(i_perf) > 0):
perf[0].append(i_perf)
if(len(d_perf) > 0):
perf[1].append(d_perf)
if(len(u_perf) > 0):
perf[2].append(u_perf)
if(len(r_perf) > 0):
perf[3].append(r_perf)
file_opr.close()
# Create marked #OP list
mark_split_list = [[[] for j in range(2)] for i in range(len(file_list))]
if(len(perf[0]) > 0 and mark_split_file != ""):
mfile = open(mark_split_file)
while True:
nstr = mfile.readline()
# read until EOF
if len(nstr) == 0:
break
token = nstr.split('\t')
for i in range(len(file_list)):
mark_split_list[i][0].append(int(token[0]))
mark_split_list[i][1].append(perf[0][i][int(token[0])])
mfile.close()
for i in range(len(file_list)):
print(file_list[i] + ": " + str(sum(mark_split_list[i][1])))
mark_merge_list = [[[] for j in range(2)] for i in range(len(file_list))]
if(len(perf[1]) > 0 and mark_merge_file != ""):
mfile = open(mark_merge_file)
while True:
nstr = mfile.readline()
# read until EOF
if len(nstr) == 0:
break
token = nstr.split('\t')
for i in range(len(file_list)):
mark_merge_list[i][0].append(int(token[0]))
mark_merge_list[i][1].append(perf[1][i][int(token[0])])
mfile.close()
for i in range(len(file_list)):
print(file_list[i] + ": " + str(sum(mark_merge_list[i][1])))
# Remove the highest data
for opr in perf:
if(len(opr) > 0):
if(rm_cnt >= min([len(x) for x in opr])):
print("Error: Number of remove >= data size")
print("Number of remove = " + str(rm_cnt))
print("Data size = " + str(min([len(x) for x in opr])))
sys.exit()
for i in range(rm_cnt):
for j in opr:
j.remove(max(j))
# plot data
for i in range(len(perf)):
dataset = perf[i]
if(len(dataset) > 0):
fig, ax = plt.subplots()
for j in range(len(dataset)):
x = np.arange(0, len(dataset[j]), 1)
y = np.array(dataset[j])
ax.scatter(x, y, label="("+str(j+1)+") "+file_list[j], s=0.3, marker='o')
# Insert
if(i == 0 and mark_split_file != ""):
for j in range(len(dataset)):
x = np.array(mark_split_list[j][0])
y = np.array(mark_split_list[j][1])
ax.scatter(x, y, label=file_list[j]+" - Split", s=0.3, marker='x')
if(i == 1 and mark_merge_file != ""):
for j in range(len(dataset)):
x = np.array(mark_merge_list[j][0])
y = np.array(mark_merge_list[j][1])
ax.scatter(x, y, label=file_list[j]+" - Split", s=0.3, marker='x')
ax.set(xlabel='op#', ylabel='response time (us)', title=opr_name[i]+' Operation Response time')
plt.title(opr_name[i])
plt.figtext(0.01, 0.96, "Overral latency: ", horizontalalignment='left')
for j in range(len(file_list)):
plt.figtext(0.01, 0.96 - 0.03 * (j+1), "(" + str(j+1) + ") " + " - " + str(round(sum(dataset[j]),2)) + "us", horizontalalignment='left')
plt.figtext(0.70, 0.96, "Average latency: ", horizontalalignment='left')
for j in range(len(file_list)):
plt.figtext(0.70, 0.96 - 0.03 * (j+1), "(" + str(j+1) + ") " + " - " + str(round(sum(dataset[j])/len(dataset[j]),2)) + "us", horizontalalignment='left')
plt.legend(loc='upper left')
ax.grid
fig.savefig(opr_name[i]+'.png') | josephly88/B-Tree_on_disk | proc_data/plot.py | plot.py | py | 5,418 | python | en | code | 0 | github-code | 13 |
974288637 | from django.conf import settings
from django.contrib.gis import admin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.admin.widgets import geo_context
from django.contrib.gis.gdal import OGRException
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.geos import GEOSGeometry, GEOSException
from django.template import loader
from ebpub.db.models import Location
from ebpub.db.models import LocationType
from ebpub.db.models import Lookup
from ebpub.db.models import NewsItem
from ebpub.db.models import Schema
from ebpub.db.models import SchemaField
"""
See http://docs.djangoproject.com/en/dev/ref/contrib/gis/admin/
"""
class OBOpenLayersWidget(OpenLayersWidget):
"""
Renders an OpenLayers map using the WKT of the geometry.
OVERRIDING FOR OPENBLOCK: This subclass has patched methods as per
http://code.djangoproject.com/attachment/ticket/9806/9806.3.diff
and we can delete it if/when
http://code.djangoproject.com/ticket/9806 gets fixed.
"""
def render(self, name, value, attrs=None):
# Update the template parameters with any attributes passed in.
if attrs: self.params.update(attrs)
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, basestring):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError):
value = None
if value and value.geom_type.upper() != self.geom_type and self.geom_type != 'GEOMETRY':
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-','_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except OGRException:
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
# Check if the field is generic so the proper values are overriden
if self.params['is_unknown']:
self.params['geom_type'] = OGRGeomType(value.geom_type)
if value.geom_type.upper() in ('LINESTRING', 'MULTILINESTRING'):
self.params['is_linestring'] = True
elif value.geom_type.upper() in ('POLYGON', 'MULTIPOLYGON'):
self.params['is_polygon'] = True
elif value.geom_type.upper() in ('POINT', 'MULTIPOINT'):
self.params['is_point'] = True
if value.geom_type.upper() in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION'):
self.params['is_collection']=True
if value.geom_type.upper() == 'GEOMETRYCOLLECTION':
self.params['collection_type'] = 'Any'
else:
self.params['collection_type'] = OGRGeomType(value.geom_type.upper().replace('MULTI', ''))
else:
if self.params['is_unknown']:
# If the geometry is unknown and the value is not set, make it as flexible as possible.
self.params['geom_type'] = OGRGeomType('GEOMETRYCOLLECTION')
self.params['is_collection']=True
self.params['collection_type'] = 'Any'
return loader.render_to_string(self.template, self.params,
context_instance=geo_context)
class OSMModelAdmin(admin.GeoModelAdmin):
# Use GeoModelAdmin to get editable geometries.
# But we'll override a few defaults to use an OpenStreetMap base layer.
default_zoom = 11
openlayers_url = getattr(settings, 'OPENLAYERS_URL', admin.GeoModelAdmin.openlayers_url)
point_zoom = 14
wms_layer = 'openstreetmap'
wms_name = 'OpenStreetMap'
wms_url = 'http://maps.opengeo.org/geowebcache/service/wms'
widget = OBOpenLayersWidget
wms_options = {'format': 'image/png'}
# Upstream patch for geodjango submitted:
# http://code.djangoproject.com/ticket/14886 ... to allow passing
# parameters to the WMS layer constructor. If/when that's fixed,
# we could remove our copy of openlayers.js.
@property
def default_lat(self):
return settings.DEFAULT_MAP_CENTER_LAT
@property
def default_lon(self):
return settings.DEFAULT_MAP_CENTER_LON
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
OVERRIDING FOR OPENBLOCK: This is the patched version of this
method as per
http://code.djangoproject.com/attachment/ticket/9806/9806.3.diff
and we can delete it if/when
http://code.djangoproject.com/ticket/9806 gets fixed.
"""
is_unknown = db_field.geom_type in ('GEOMETRY',)
if not is_unknown:
#If it is not generic, get the parameters from the db_field
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field.geom_type.upper().replace('MULTI', ''))
else:
collection_type = 'None'
is_linestring = db_field.geom_type in ('LINESTRING', 'MULTILINESTRING')
is_polygon = db_field.geom_type in ('POLYGON', 'MULTIPOLYGON')
is_point = db_field.geom_type in ('POINT', 'MULTIPOINT')
geom_type = OGRGeomType(db_field.geom_type)
else:
#If it is generic, set sensible defaults
is_collection = False
collection_type = 'None'
is_linestring = False
is_polygon = False
is_point = False
geom_type = OGRGeomType('Unknown')
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', '.join(wms_options)
wms_options = ', ' + wms_options
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : geom_type,
'field_name' : db_field.name,
'is_unknown': is_unknown,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : is_linestring,
'is_polygon' : is_polygon,
'is_point' : is_point,
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'point_zoom' : self.point_zoom,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'wms_options': wms_options,
'debug' : self.debug,
}
return OLMap
class NewsItemAdmin(OSMModelAdmin):
list_display = ('title', 'schema', 'item_date', 'pub_date', 'location_name')
list_filter = ('schema',)
class LocationAdmin(OSMModelAdmin):
pass
class LookupAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'slug', 'schema_field')
list_filter = ('schema_field',)
admin.site.register(Schema)
admin.site.register(SchemaField)
admin.site.register(NewsItem, NewsItemAdmin)
admin.site.register(LocationType)
admin.site.register(Location, LocationAdmin)
admin.site.register(Lookup, LookupAdmin)
# Datamodel spike hacks - XXX de-hardcode these
from ebpub.db.models import RestaurantInspection
class RestaurantInspectionAdmin(NewsItemAdmin):
# Not the best UI if there are a ton of Lookups, but
# normally there probably aren't and it works OK.
filter_horizontal = ('violation', 'result')
admin.site.register(RestaurantInspection, RestaurantInspectionAdmin)
| christaggart/openblock | ebpub/ebpub/db/admin.py | admin.py | py | 10,191 | python | en | code | null | github-code | 13 |
12410379593 |
#Write a program that finds the summation of every number from 1 to num. The number will always be a positive integer greater than 0.
#For example:
#summation(2) -> 3 1 + 2
#summation(8) -> 36 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8
def summation(num):
number = 0
for i in range(1, num+1):
# Learned: to create a range from the start to num + 1
number += i
return(number)
summation(8)
| KaiaWalters/code-wars | Python/grasshopperSummation.py | grasshopperSummation.py | py | 397 | python | en | code | 1 | github-code | 13 |
74176776979 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 10:39:04 2017
@author: gregz
"""
from args import parse_args
import os.path as op
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import datetime
import warnings
import sys
import traceback
from fiber import Fiber
from fiber_utils import get_indices
from scipy.signal import medfilt
from utils import biweight_location
# Say, "the default sans-serif font is COMIC SANS"
matplotlib.rcParams['font.sans-serif'] = "Meiryo"
# Then, "ALWAYS use sans-serif fonts"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.style.use('seaborn-colorblind')
def execute_function(obj, call, kwargs={}):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
func = getattr(obj, call)
func(**kwargs)
except:
obj.log.error('Error occured while running %s on %s' %(call, obj.basename))
obj.log.error(sys.exc_info()[0])
obj.log.error(traceback.print_exc(file=sys.stdout))
def get_multi_filename(amp):
return op.join(amp.path, 'multi_%s_%s_%s_%s.fits'
%(amp.specid, amp.ifuslot,
amp.ifuid, amp.amp))
def get_fits(amp):
fn = get_multi_filename(amp)
try:
return fits.open(fn)
except:
return None
def write_fits(F):
try:
F.writeto(F.filename(),overwrite=True)
except TypeError:
F.writeto(F.filename(), clobber=True)
def build_sky_list(amps):
return [amp for amp in amps if (amp.exptime<=900.) and (amp.exptime>1e-8)]
def build_sci_list(amps):
return [amp for amp in amps if amp.exptime>900.]
def reduce_twi(twi, args):
if op.exists(get_multi_filename(twi)) and args.rerun_twi is False:
return False
operations = ['prepare_image', 'get_trace',
'get_fibermodel', 'get_wavelength_solution',
'get_fiber_to_fiber']
for operation in operations:
execute_function(twi, operation)
execute_function(twi, 'save_fibmodel')
image_list = ['image','error']
spec_list = ['trace', 'wavelength', 'spectrum', 'fiber_to_fiber', 'dead']
execute_function(twi, 'save',
{'image_list': image_list,
'spec_list': spec_list})
clean_amp(twi)
return True
def reduce_sci(sci, args, calpath):
if op.exists(get_multi_filename(sci)) and args.rerun_sci is False:
return False
sci.calpath = calpath
sci.check_fibermodel = False
sci.check_wave = False
operations = ['prepare_image', 'get_trace']
sci.refit = True
for operation in operations:
execute_function(sci, operation)
sci.refit = False
execute_function(sci, 'load', {'path':'calpath',
'spec_list':['wavelength', 'spectrum',
'fiber_to_fiber', 'dead']})
image_list = ['image','error']
execute_function(sci, 'fiberextract')
spec_list = ['spectrum', 'wavelength', 'trace', 'fiber_to_fiber']
execute_function(sci, 'save', {'image_list': image_list,
'spec_list': spec_list})
clean_amp(sci)
def get_datetime(amp):
base = op.basename(amp.filename)
Y, M, D, H, m, S = [int(s) for s in [base[:4],base[4:6],
base[6:8],base[9:11],
base[11:13],base[13:15]]]
return datetime.datetime(Y, M, D, H, m, S)
def string_from_datetime(d):
string = '%04d-%02d-%02d %02d:%02d:%02d' %(d.year, d.month, d.day, d.hour,
d.minute, d.second)
return string
def get_master_sky(wavelength, spectrum, fiber_to_fiber, path, exptime):
masterwave = []
masterspec = []
for wave, spec, ftf in zip(wavelength,spectrum,fiber_to_fiber):
masterwave.append(wave)
masterspec.append(np.where(ftf>1e-8, spec/ftf, 0.0))
masterwave = np.hstack(masterwave)
ind = np.argsort(masterwave)
masterwave[:] = masterwave[ind]
masterspec = np.hstack(masterspec)
masterspec[:] = masterspec[ind]
mastersky = medfilt(masterspec, 281)
wv = np.arange(masterwave.min(),masterwave.max()+0.05,0.05)
s = np.zeros((len(wv),2))
s[:,0] = wv
s[:,1] = np.interp(wv, masterwave, mastersky / exptime)
np.savetxt(op.join(path, 'sky_model.txt'), s)
def reduce_sky(sky, args, trace_list, calpath):
if op.exists(get_multi_filename(sky)) and args.rerun_sky is False:
F = get_fits(sky)
sky.log.info('Getting master sky for %s' %sky.basename)
return get_master_sky(F['wavelength'].data, F['spectrum'].data,
F['fiber_to_fiber'].data, sky.path, sky.exptime)
sky.calpath = calpath
sky.check_fibermodel = False
sky.check_wave = False
operations = ['prepare_image']
for operation in operations:
execute_function(sky, operation)
get_trace(sky, trace_list)
execute_function(sky, 'load', {'path':'calpath',
'spec_list':['wavelength', 'spectrum',
'fiber_to_fiber', 'dead']})
image_list = ['image','error']
execute_function(sky, 'fiberextract')
spec_list = ['spectrum', 'wavelength', 'trace', 'fiber_to_fiber']
execute_function(sky, 'save', {'image_list': image_list,
'spec_list': spec_list})
sky.get_master_sky(sky=True)
wv = np.arange(sky.masterwave.min(),sky.masterwave.max()+0.05,0.05)
s = np.zeros((len(wv),2))
s[:,0] = wv
s[:,1] = np.interp(wv, sky.masterwave, sky.mastersky / sky.exptime)
np.savetxt(op.join(sky.path, 'sky_model.txt'), s)
clean_amp(sky)
def get_interp_weights(amp_in, amp_list):
timediff = np.zeros((len(amp_list),))
for i,amp in enumerate(amp_list):
dt = get_datetime(amp)
ds = get_datetime(amp_in)
td = dt - ds
timediff[i] = td.days * 86400. + td.seconds
timediff = np.sort(timediff)
f = np.zeros(timediff.shape)
w = 1. * f
for i in np.arange(timediff.shape[0]):
f[i] = 1.
w[i] = np.interp(0., timediff, f)
f[i] = 0.
return w, np.argsort(timediff)
def get_trace(sky, trace_list):
sky.log.info('Getting trace from other frames near in time.')
w, sorted_ind = get_interp_weights(sky, trace_list)
arr_list = []
for i in np.arange(len(w)):
if w[i] > 0.0:
arr_list.append(w[i] * get_fits(trace_list[sorted_ind[i]])['trace'].data)
set_trace(sky, np.sum(arr_list, axis=(0,)))
def set_trace(sky, array):
sky.log.info('Setting trace from other frames near in time.')
try:
a = array.shape[0]
except KeyError:
sky.log.error('Failed to open extension %s for %s' %('trace', sky.basename))
return None
for j in np.arange(a):
try:
f = sky.fibers[j]
except IndexError:
f = Fiber(sky.D, j+1, sky.path, sky.filename)
sky.fibers.append(f)
sky.fibers[j].trace = array[j]
get_indices(sky.image, sky.fibers, sky.fsize)
def clean_amp(amp):
amp.image = None
amp.back = None
amp.clean_image = None
amp.continuum_sub = None
amp.residual = None
amp.error = None
amp.sig = None
amp.sigwave = None
amp.error_analysis = None
amp.fibers = None
def subtract_sky_from_sci(sci, sky_list, sky_model_list, wave, use_sci=False):
sci.log.info('Subtracting sky from %s' %sci.basename)
if use_sci:
fn = op.join(sci.path, 'sky_model.txt')
if True:#not op.exists(fn):
F = fits.open('panacea/leoI_20131209.fits')
G = get_fits(sci)
get_master_sky(G['wavelength'].data,
G['spectrum'].data - F[0].data*sci.exptime,
G['fiber_to_fiber'].data, sci.path, sci.exptime)
wave, sky_model = np.loadtxt(fn, unpack=True)
else:
weight, sorted_ind = get_interp_weights(sci, sky_list)
arr_list = []
for i,w in enumerate(weight):
if w > 0.0:
arr_list.append(w * sky_model_list[sorted_ind[i]])
sky_model = np.sum(arr_list, axis=(0,))
SCI = get_fits(sci)
sky_subtracted = np.zeros(SCI['spectrum'].data.shape)
for i, spec in enumerate(SCI['spectrum'].data):
sky_subtracted[i,:] = (spec - np.interp(SCI['wavelength'].data[i,:],
wave, sky_model*sci.exptime)
* SCI['fiber_to_fiber'].data[i,:])
s = fits.ImageHDU(sky_subtracted)
erase = []
for i,S in enumerate(SCI):
if S.header['EXTNAME'] == 'sky_subtracted':
erase.append(i)
for i in sorted(erase,reverse=True):
del SCI[i]
SCI.append(s)
SCI[-1].header['EXTNAME'] = 'sky_subtracted'
write_fits(SCI)
return SCI['wavelength'].data, sky_subtracted, wave, sky_model
def make_collapsed_cube(sci, ifucen, ext='sky_subtracted',
scale=1.0, seeing=2.0, wlow=5150,
whigh=5250):
F = get_fits(sci)
data = np.zeros((F[ext].data.shape[0],))
for i, v in enumerate(data):
xl = np.searchsorted(F['wavelength'].data[i,:],wlow,side='left')
xh = np.searchsorted(F['wavelength'].data[i,:],whigh,side='right')
data[i] = biweight_location(F[ext].data[i,xl:xh])
x = np.arange(ifucen[:,0].min()-scale,
ifucen[:,0].max()+scale, scale)
y = np.arange(ifucen[:,1].min()-scale,
ifucen[:,1].max()+scale, scale)
xgrid, ygrid = np.meshgrid(x, y)
d = np.zeros((len(ifucen[:,1]),)+xgrid.shape)
w = np.zeros((len(ifucen[:,1]),)+xgrid.shape)
for i in xrange(len(x)):
for j in xrange(len(y)):
d[:,j,i]= np.sqrt((ifucen[:,0] - xgrid[j,i])**2 +
(ifucen[:,1] - ygrid[j,i])**2)
w[:,j,i] = np.exp(-1./2.*(d[:,j,i]/seeing)**2)
ws = w.sum(axis=0)
zgrid = (data[:,np.newaxis,np.newaxis]*w).sum(axis=0)/ws
hdu = fits.PrimaryHDU(np.array(zgrid, dtype='float32'))
hdu.header['CRPIX1'] = len(x)/2.
hdu.header['CRPIX2'] = len(y)/2.
hdu.header['CRVAL1'] = 0.
hdu.header['CRVAL2'] = 0.
hdu.header['CDELT1'] = scale
hdu.header['CDELT2'] = scale
erase = []
for i,S in enumerate(F):
if S.header['EXTNAME'] == 'collapsed':
erase.append(i)
for i in sorted(erase,reverse=True):
del F[i]
F.append(hdu)
F[-1].header['EXTNAME'] = 'collapsed'
write_fits(F)
def main(argv=None):
args = parse_args(argv)
ifucen = np.loadtxt(op.join(args.kwargs['virusconfig'], 'IFUcen_files',
args.ifucen_fn['R'][0]),
usecols=[0,1,2], skiprows=args.ifucen_fn['R'][1])
args.rerun_twi = False
args.rerun_sci = False
args.rerun_sky = False
plot_sky = True
plot_sci = True
use_sci = True # first subtract from sky then from sci model
sky_list = build_sky_list(args.sci_list)
sci_list = build_sci_list(args.sci_list)
for twi in args.twi_list:
response = reduce_twi(twi, args)
for sci in sci_list:
reduce_sci(sci, args, args.twi_list[0].path)
trace_list = [sci for sci in sci_list]
trace_list.insert(0,args.twi_list[0])
sky_spec_list = []
if plot_sky:
wlow = 4800
whigh = 4900
fig, ax = plt.subplots(1, figsize=(8,6))
for sky in sky_list:
fn = op.join(sky.path, 'sky_model.txt')
if op.exists(fn):
w, s = np.loadtxt(fn, unpack=True)
else:
reduce_sky(sky, args, trace_list, args.twi_list[0].path)
w, s = np.loadtxt(op.join(sky.path, 'sky_model.txt'), unpack=True)
sky_spec_list.append(s)
if plot_sky:
xl = np.searchsorted(w, wlow)
xh = np.searchsorted(w, whigh, side='right')
ax.plot(w[xl:xh], s[xl:xh],
label='sky:'+string_from_datetime(get_datetime(sky)))
w = np.arange(4700., 5510., 0.05)
if plot_sci:
wlow = 4800
whigh = 4900
fig1, ax1 = plt.subplots(1, figsize=(8,6))
fibers = [76]
for sci in sci_list:
wave, skys, wvs, skysp = subtract_sky_from_sci(sci, sky_list, sky_spec_list, w,
use_sci=use_sci)
make_collapsed_cube(sci, ifucen[:,1:3][::-1,:])
if plot_sci:
for fiber in fibers:
xl = np.searchsorted(wave[fiber,:], wlow)
xh = np.searchsorted(wave[fiber,:], whigh, side='right')
fstr = '%03d:' %fiber
ax1.plot(wave[fiber,xl:xh], skys[fiber,xl:xh]/sci.exptime,
label=fstr+string_from_datetime(get_datetime(sci)))
if plot_sky:
xl = np.searchsorted(wvs, wlow)
xh = np.searchsorted(wvs, whigh, side='right')
ax.plot(wvs[xl:xh], skysp[xl:xh],
label='sci:'+string_from_datetime(get_datetime(sci)))
if plot_sci:
ax1.set_xlim([wlow,whigh])
ax1.set_ylim([0.00, 0.1])
ax1.legend(loc='best', fancybox=True, framealpha=0.5)
ax1.tick_params(labelsize=10)
ax1.set_ylabel(r'Sci Brightness (e-/s)', fontsize=12)
ax1.set_xlabel(r'Wavelength ($\AA$)', fontsize=12)
fig1.savefig('sci_spec_%s.png' %args.twi_list[0].date.date(), dpi=150)
if plot_sky:
ax.set_xlim([wlow,whigh])
ax.set_ylim([0.00, 0.05])
ax.legend(loc='best', fancybox=True, framealpha=0.5)
ax.tick_params(labelsize=10)
ax.set_ylabel(r'Sky Brightness (e-/s)', fontsize=12)
ax.set_xlabel(r'Wavelength ($\AA$)', fontsize=12)
fig.savefig('sky_spec_%s.png' %args.twi_list[0].date.date(), dpi=150)
if __name__ == '__main__':
main()
| grzeimann/Panacea | virusw_special_reduction.py | virusw_special_reduction.py | py | 14,173 | python | en | code | 8 | github-code | 13 |
71918189459 | """
025. 保龄球
小明到保龄球馆打保龄球,一次十局。若一到八局都零分,剩下最后两局。
保龄球打球规则为:
(1) 每一局有十瓶保龄球瓶。
(2) 若某局第一球没有全部打倒十瓶保龄球瓶,可再打第二球。
(3) 若某局第一球打倒全部十瓶保龄球瓶,此局只打一球。
(4) 若第十局打倒全部十瓶保龄球瓶,此局可以打三球。
保龄球每一局计分规则为:
(1) 两球打倒保龄球瓶少于十瓶,每一瓶得一分。
例如两球打倒 7 瓶、2瓶,计为 7 2。
此局分数计为 7+2 = 9。
(2) 第一球打倒保龄球瓶少于十瓶,第二球将剩余球瓶均打倒 (spare),
每一瓶得一分,并加计后面一球打倒瓶数。
例如两球打倒 7 瓶、3 瓶,下一球打倒 5 瓶,计为 7 3。
若此局为第十局,计为 7 3 5。
此局分数为 7 + 3 + 5。
(3) 第一球打倒保龄球瓶等于十瓶,不用打第二球 (strike)。
每一瓶得一分,并加计后面两球打倒的瓶数。
例如本局第一球打打倒 10 瓶,后面两球打倒 5、0 瓶,本局计为 10。
若此局为第十局,计为 10 5 0。
此局分数为 10 + 5 + 0。
例如本局打倒 10 瓶,后面两球打倒 10、10 瓶,计为 10。
若此局为第十局,计为 10 10 10。
此局分数为 10 + 10 + 10。
试算出总得分
测试案例(Test Case)资料:
Input:
2 5
7 1
Output:
15
---------------
Input:
5 5
10 8 0
Output:
38
---------------
Input:
10
10 8 2
Output:
48
---------------
Input:
8 2
7 3 8
Output:
35
"""
def main():
ninth = list(map(int, input().split())) #第九局
tenth = list(map(int, input().split())) #第十局
result = compute(ninth, tenth) #计算总分
print('%d' %result) #输出总分数
def compute(ninth, tenth): #计算总分
result = 0 #总分数
n = 0 #取n个分数
i = 0 #index
if(ninth[0]==10): #判断第九局是否strike
result += ninth[0] + sum(tenth[:2]) #加入第十局的前两球与第九局分数
elif(sum(ninth)==10): #判断第九局是否spare
result = sum(ninth) + tenth[0] #加入第十局的前一球与第九局分数
else:
result = sum(ninth) #加入第九局分数
result += sum(tenth) #加入第十局全部击中分数
return result
main() | guyleaf/python | homework/025. 保齡球/test25.py | test25.py | py | 2,329 | python | zh | code | 1 | github-code | 13 |
25523992503 | import tkinter as tk
from initialize import initialize_database
from ui.login_view import LoginView
from ui.register_view import RegisterView
from ui.game_view import TicTacToeGrid
from game import TicTacToeGame
from services.service import UserService
class UI:
"""Luokka, joka vastaa käyttöliittymästä"""
def __init__(self, root):
"""Luokan konstruktori, joka alustaa käyttöliittymän
Args:
root: juurikomponentti
"""
self.root = root
self.current_view = None
self.user_service = UserService()
self.initialize_ui()
def initialize_ui(self):
"""Metodi, jonka avulla käyttöliittymä käynnistetään"""
self.root.title("Ristinolla")
initialize_database()
self.show_login_view()
def show_login_view(self):
self.hide_current_view()
self.current_view = LoginView(
self.root,
self.show_register_view,
self.start_game
)
self.current_view.pack()
def show_register_view(self):
self.hide_current_view()
self.current_view = RegisterView(
self.root,
self.show_login_view
)
self.current_view.pack()
# En saanut tätä metodia toimimaan kurssimateriaalin ohjeilla
# joten käytetty apuna ChatGPT
def hide_current_view(self):
for widget in self.root.winfo_children():
widget.destroy()
# tähän loppuu koodi, jossa käytetty apuna ChatGPT
def start_game(self):
self.hide_current_view()
game = TicTacToeGame()
self.current_view = TicTacToeGrid(game)
if __name__ == "__main__":
root = tk.Tk()
app = UI(root)
root.mainloop()
| xcvbnmas/ot-harjoitustyo | src/main.py | main.py | py | 1,750 | python | fi | code | 0 | github-code | 13 |
17119948303 | import pygame as pg
from dataclasses import dataclass
import random as rnd
resolution = 400
grid = 20
size = resolution // grid
mine_count = 20
pg.init()
screen = pg.display.set_mode([resolution, resolution])
cell_normal = pg.transform.scale(
pg.image.load("Teil_10_ms_cell_normal.gif"), (size, size)
)
cell_marked = pg.transform.scale(
pg.image.load("Teil_10_ms_cell_marked.gif"), (size, size)
)
cell_mine = pg.transform.scale(pg.image.load("Teil_10_ms_cell_mine.gif"), (size, size))
cell_selected = []
for n in range(9):
cell_selected.append(
pg.transform.scale(pg.image.load(f"Teil_10_ms_cell_{n}.gif"), (size, size))
)
matrix = []
next_cells = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
@dataclass
class Cell:
row: int
column: int
mine: bool = False
selected: bool = False
flagged: bool = False
next_to = int = 0
def show(self):
pos = (self.column * size, self.row * size)
if self.selected:
if self.mine:
screen.blit(cell_mine, pos)
else:
screen.blit(cell_selected[self.next_to], pos)
else:
if self.flagged:
screen.blit(cell_marked, pos)
else:
screen.blit(cell_normal, pos)
def count_mines_next(self):
for pos in next_cells:
new_row = self.row + pos[0]
new_column = self.column + pos[1]
if (
new_row >= 0
and new_row < grid
and new_column >= 0
and new_column < grid
):
if matrix[new_row * grid + new_column].mine:
self.next_to += 1
def floodfill(row, column):
for pos in next_cells:
new_row = row + pos[0]
new_column = column + pos[1]
if new_row >= 0 and new_row < grid and new_column >= 0 and new_column < grid:
cell = matrix[new_row * grid + new_column]
if cell.next_to == 0 and not cell.selected:
cell.selected = True
floodfill(new_row, new_column)
else:
cell.selected = True
for n in range(grid * grid):
matrix.append(Cell(n // grid, n % grid))
while mine_count > 0:
x = rnd.randrange(grid * grid)
if not matrix[x].mine:
matrix[x].mine = True
mine_count -= 1
for object in matrix:
object.count_mines_next()
go_on = True
while go_on:
for event in pg.event.get():
if event.type == pg.QUIT:
go_on = False
if event.type == pg.MOUSEBUTTONDOWN:
mouseX, mouseY = pg.mouse.get_pos()
column = mouseX // size
row = mouseY // size
i = row * grid + column
cell = matrix[i]
if pg.mouse.get_pressed()[2]:
cell.flagged = not cell.flagged
if pg.mouse.get_pressed()[0]:
cell.selected = True
if cell.next_to == 0 and not cell.mine:
floodfill(row, column)
if cell.mine:
for object in matrix:
object.selected = True
for object in matrix:
object.show()
pg.display.flip()
pg.quit()
| ReturntoSender/python | minesweeper/minesweeper.py | minesweeper.py | py | 3,264 | python | en | code | 0 | github-code | 13 |
31687855816 | from django.db import models
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.models import User
fs = FileSystemStorage(location='/media/hushvids')
class Show(models.Model):
"""
Represents a show
"""
title = models.CharField(max_length=255)
class Episode(models.Model):
"""
Base video model
"""
filename = models.CharField(max_length=255)
video = models.FileField(storage=fs)
#Don't delete a fucking show unless you also want every episode gone too
#OR remove on_delete arg if you're fucking with Show models
show = models.ForeignKey(Show, on_delete=models.CASCADE)
class Movie(models.Model):
"""
Respresents a Movie
"""
title = models.CharField(max_length=255)
video = models.FileField(storage=fs)
class Post(models.Model):
"""
Used to store posts to the home/announcements page
"""
title = models.CharField(max_length=255)
date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User)
text = models.TextField()
| zlandry13/hush | hushstream/video/models.py | models.py | py | 1,068 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.