index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,800 | 53a91d72136a390e6a178b30e8e87966c4097238 | #this function returns the nth triangular number.1,3,6,10,15,21 etc.
def triangularnum():
n = int(input("Which triangular number do you need?: "))
tri = 0
c = 0
for i in range (0,n+1):
tri += i
c += 1
return (tri,c-1)
result = triangularnum()
count = result[1]
num = result[0]
print("\nThe {}th triangular number is {}.".format(count,num))
|
987,801 | e5152573b78fee12cd5935f02dd2bb8b65a731ec | import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
import asyncio
import colorsys
import random
import platform
from discord import Game, Embed, Color, Status, ChannelType
from discord import Spotify
import os
import functools
import time
import datetime
import requests
import json
import aiohttp
async def status_task():
while True:
await client.change_presence(status=discord.Status.online)
Forbidden= discord.Embed(title="Permission Denied", description="1) Please check whether you have permission to perform this action or not. \n2) Please check whether my role has permission to perform this action in this channel or not. \n3) Please check my role position.", color=0x00ff00)
client = commands.Bot(description="The Laughing Clown BOT", command_prefix=commands.when_mentioned_or("%"), pm_help = True)
client.remove_command('help')
@client.event
async def on_ready():
print('-----')
print('-----')
print("Created by I'm Joker")
client.loop.create_task(status_task())
@client.command(pass_context = True)
async def hlo(ctx):
await ctx.send(f"Hello, How's your day today? {ctx.message.author.mention}")
@client.command(pass_context = True)
async def quote(ctx):
choices = ["**Smile, because it confuses people. Smile, because it's easier than explaining what is killing you inside. - THE JOKER**", "**As you know, madness is like gravity...all it takes is a little push. - THE JOKER**", "**If you’re good at something, never do it for free. - THE JOKER**", "**Nobody panics when things go “according to plan”. Even if the plan is horrifying! - THE JOKER**", "**Introduce a little anarchy. Upset the established order, and everything becomes chaos. I'm an agent of chaos... - THE JOKER**", "**Do I really look like a guy with a plan? You know what I am? I'm a dog chasing cars. I wouldn't know what to do with one if I caught it! You know, I just... *do* things. - THE JOKER**", "**What doesn't kill you, simply makes you stranger! - THE JOKER**", "**Why so serious? >:) - THE JOKER**", "**They Laugh At me Because I'm Different. I laugh At Them Because The're all the same - THE JOKER**", "**Their morals, their code; it's a bad joke. Dropped at the first sign of trouble. They're only as good as the world allows them to be. You'll see- I'll show you. When the chips are down these, uh, civilized people? They'll eat each other. See I'm not a monster, I'm just ahead of the curve. - THE JOKER**", "**The only sensible way to live in this world is without rules. - THE JOKER**"]
embed = discord.Embed(title = " ", description = "**RIP Heath Ledger.... You've gave us a memorable gift like JOKER... We can't forget you...**", color=0XFF69B4)
embed.add_field(name="Here's a quote of JOKER for you....", value = random.choice(choices))
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/531162741281521665/Heath_Ledger.png')
embed.set_footer(text=f'Requested by {ctx.message.author}', icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def botinfo(ctx):
User = await client.fetch_user('472128507150073871')
User2 = await client.fetch_user('498378677512437762')
User3 = await client.fetch_user('500219510079356928')
User4 = await client.fetch_user("400255149014122496")
embed=discord.Embed(title="Details of this BOT...", description="Here are the details of this BOT below", color=0XFF69B4)
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_footer(text=f'Requested by {ctx.message.author.name}', icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name=f"This is Official BOT of {ctx.guild.name} server")
embed.add_field(name="__**Creator**__", value=User.mention, inline = True)
embed.add_field(name="__**Special Thanks To**__", value=f"{User2.mention} \n {User3.mention} \n {User4.mention}")
embed.add_field(name="**Currently connected servers**", value=str(len(client.guilds)), inline = True)
embed.add_field(name="**Currently connected users**", value=str(len(set(client.get_all_members()))), inline = True)
embed.add_field(name="If you have any queries about this BOT, DM me...", value=User.mention)
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def userid(ctx):
await ctx.send(f"{ctx.message.author.id}")
@client.command(pass_context = True)
async def fams(ctx):
choices = ['https://media.giphy.com/media/zDAqUralC0HU4/giphy.gif', 'https://media.giphy.com/media/pz1s2IpdQh86k/giphy.gif', 'https://media.giphy.com/media/1LnQIODGufGec/giphy.gif', 'https://media.giphy.com/media/yROJ5dn5IhR5u/giphy.gif', 'https://media.giphy.com/media/SjWEmbTtlOwcU/giphy.gif', 'https://media.giphy.com/media/396CPbx4g1o9W/giphy.gif', 'https://media.giphy.com/media/mXz3v0UdjrNTO/giphy.gif', 'https://media.giphy.com/media/XAr3mee7JuXYc/giphy.gif', 'https://media.giphy.com/media/12I9y6on09avza/giphy.gif', 'https://media.giphy.com/media/zBdfuQVMClAis/giphy.gif']
embed = discord.Embed(title = "Hello {}, Here's your GIF....".format(ctx.message.author.name), description = " ", color=0XFF69B4)
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_footer(text=f'Requested by {ctx.message.author}', icon_url=f'{ctx.message.author.avatar_url}')
embed.set_image(url=random.choice(choices))
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def avatar(ctx, user: discord.Member=None):
if user is None:
embed = discord.Embed(title=f'Avatar', description="Here's your avatar that you've requested...\n Don't misuse this cmd...", color=0XFF69B4)
embed.add_field(name='User: {}'.format(ctx.message.author.name), value='Avatar:', inline=True)
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_image(url = ctx.message.author.avatar_url)
embed.set_footer(text=f"Requested by {ctx.message.author.name}", icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
else:
embed = discord.Embed(title=f'Avatar', description="Here's your avatar that you've requested...\n Don't misuse this cmd...", color=0XFF69B4)
embed.add_field(name='User: {}'.format(user.name), value='Avatar:', inline=True)
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_footer(text=f"Requested by {ctx.message.author.name}", icon_url=f"{ctx.message.author.avatar_url}")
embed.timestamp = datetime.datetime.utcnow()
embed.set_image(url = user.avatar_url)
await ctx.send(embed=embed)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def poll(ctx, question, *options:str):
if len(options) <=1:
await ctx.send('Joker needs more than one option to conduct poll!!')
return
if len(options) > 10:
await ctx.send("Joker Can't accept more than 10 options to conduct poll!")
return
if len(options) == 2 and options[0] == 'yes' and options[1] == 'no':
reactions = ['👍', '👎']
else:
reactions = ['1\u20e3', '2\u20e3', '3\u20e3', '4\u20e3', '5\u20e3', '6\u20e3', '7\u20e3', '8\u20e3', '9\u20e3', '\U0001f51f']
description = []
for x, option in enumerate(options):
description += '\n {} {}'.format(reactions[x], option)
embed = discord.Embed(title=question, description=''.join(description), color=0XFF69B4)
react_message = await ctx.send(embed=embed)
for reaction in reactions[:len(options)]:
await react_message.add_reaction(reaction)
embed.set_footer(text='poll ID: {}'.format(react_message.id))
await react_message.edit(embed=embed)
@client.command(pass_context = True)
async def marvel(ctx):
choices = ['https://media.giphy.com/media/F9hQLAVhWnL56/giphy.gif', 'https://media.giphy.com/media/l4FGrYKtP0pBGpBAY/giphy.gif', 'https://media.giphy.com/media/JzujPK0id34qI/giphy.gif', 'https://media.giphy.com/media/M9TuBZs3LIQz6/giphy.gif', 'https://media.giphy.com/media/3GnKKEw2v7bXi/giphy.gif', 'https://media.giphy.com/media/GR1WWKadM9m0g/giphy.gif', 'https://media.giphy.com/media/iBpq5SbrYiSTTSHO7z/giphy.gif', 'https://media.giphy.com/media/dJirXKRo0j1l0j9V9Q/giphy.gif', 'https://media.giphy.com/media/ZvkFmclQO1ImmRNm0K/giphy.gif', 'https://media.giphy.com/media/82Mksc7tnX3qp4FVNN/giphy.gif', 'https://media.giphy.com/media/mTQhl6cWXDJBu/giphy.gif']
embed=discord.Embed(title="Hello {}... Here's your GIF...".format(ctx.message.author.name), description="This BOT is made by I'm Joker", color=0XFF69B4)
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_footer(text=f'Requested by {ctx.message.author.name} ', icon_url=f'{ctx.message.author.avatar_url}')
embed.set_image(url=random.choice(choices))
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def dc(ctx):
choices = ['https://media.giphy.com/media/uDPSXySAEDv56/giphy.gif', 'https://media.giphy.com/media/26vIg1DlkNdJr65q0/giphy.gif', 'https://media.giphy.com/media/jcIRoyJKQG3za/giphy.gif', 'https://media.giphy.com/media/26xBLVi4RuhYmV6zm/giphy.gif', 'https://media.giphy.com/media/xUOwGfcrlRjKjs2sSI/giphy.gif', 'https://media.giphy.com/media/l41Yq5KYEmbxFaeVq/giphy.gif', 'https://media.giphy.com/media/3o7abJW5ZuiByDelji/giphy.gif', 'https://media.giphy.com/media/xU67CtAMi8f5K/giphy.gif', 'https://media.giphy.com/media/VXQuKHDhTIBWM/giphy.gif']
embed=discord.Embed(title="Hello kryptonian... Here's your GIF...", color=0XFF69B4)
embed.set_image(url=random.choice(choices))
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_footer(text=f'Requested by {ctx.message.author.name}', icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def joker(ctx):
choices = ['https://media.giphy.com/media/KZd26L2o8QXtK/giphy.gif', 'https://media.giphy.com/media/aazZrFTMrDKLK/giphy.gif', 'https://media.giphy.com/media/F0A48Q2wFjE7S/giphy.gif', 'https://media.giphy.com/media/7waKDy5RbDYVG/giphy.gif', 'https://media.giphy.com/media/13m24iFmhomZi0/giphy.gif', 'https://media.giphy.com/media/zCP1GdPjxtCTe/giphy.gif', 'https://media.giphy.com/media/tN2OR1R1BLKV2/giphy.gif', 'https://media.giphy.com/media/X9Z0O2bpi8GMU/giphy.gif', 'https://media.giphy.com/media/YPIrsRqqO7oB2/giphy.gif', 'https://media.giphy.com/media/FSp1Wqx2TPYSA/giphy.gif', 'https://media.giphy.com/media/8UwEdwAF5XWQE/giphy.gif']
embed=discord.Embed(title="Hello Joker fan... Here's a GIF for you...", description="Tribute to the legendary **Heath Ledger**", color=0XFF69B4)
embed.set_image(url=random.choice(choices))
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/531162741281521665/Heath_Ledger.png')
embed.set_footer(text=f'Requested by {ctx.message.author.name}', icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def meme(ctx):
async with aiohttp.ClientSession() as session:
async with session.get("https://api.reddit.com/r/me_irl/random") as r:
data = await r.json()
embed = discord.Embed(title='Meme',color=0XFF69B4)
embed.set_image(url=data[0]["data"]["children"][0]["data"]["url"])
embed.set_footer(text=f'Requested by: {ctx.message.author.name}', icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def serverinvite(ctx):
link = "**Thanks for joining in our server.... Invite your friends and tell them join the party too** \n https://discord.gg/hhmfxW3"
await ctx.send(link)
@client.command(pass_context = True)
@commands.has_permissions(manage_messages=True)
async def clear(ctx, number):
if ctx.message.author.guild_permissions.manage_messages:
mgs = [] #Empty list to put all the messages in the log
number = int(number) #Converting the amount of messages to delete to an integer
async for x in ctx.history(limit = number+1):
mgs.append(x)
try:
await ctx.message.channel.delete_messages(mgs)
x = await ctx.send('`Joker has deleted '+str(number)+' messages for you...`')
await asyncio.sleep(5)
await x.delete()
except discord.Forbidden:
await ctx.send(embed=Forbidden)
return
except discord.HTTPException:
await ctx.send('clear failed.')
return
await ctx.delete_messages(mgs)
@client.command(pass_context=True)
async def movie(ctx, *, name:str=None):
await ctx.trigger_typing()
if name is None:
embed=discord.Embed(description = "Please specify a movie, *eg. %movie Bohemian Rhapsody*", color = 0XFF69B4)
await ctx.send(embed=embed)
key = "4210fd67"
url = "http://www.omdbapi.com/?t={}&apikey={}".format(name, key)
response = requests.get(url)
x = json.loads(response.text)
embed=discord.Embed(title = "**{}**".format(name).upper(), description = "Here is your movie {}".format(ctx.message.author.name), color = 0XFF69B4)
if x["Poster"] != "N/A":
embed.set_thumbnail(url = x["Poster"])
embed.add_field(name = "__Title__", value = x["Title"])
embed.add_field(name = "__Released__", value = x["Released"])
embed.add_field(name = "__Runtime__", value = x["Runtime"])
embed.add_field(name = "__Genre__", value = x["Genre"])
embed.add_field(name = "__Director__", value = x["Director"])
embed.add_field(name = "__Writer__", value = x["Writer"])
embed.add_field(name = "__Actors__", value = x["Actors"])
embed.add_field(name = "__Plot__", value = x["Plot"])
embed.add_field(name = "__Language__", value = x["Language"])
embed.add_field(name = "__Imdb Rating__", value = x["imdbRating"]+"/10")
embed.add_field(name = "__Type__", value = x["Type"])
embed.set_footer(text = "Information from the OMDB API")
await ctx.send(embed=embed)
@client.command(pass_context = True)
async def help(ctx):
embed=discord.Embed(title="__Command Prefix__: %", description='', color=0XFF69B4)
embed.add_field(name="__**Summary**__", value="**This is the official BOT of REFORMED server. You can't find this BOT anywhere than here. This BOT is made in memory of JOKER \n And this BOT can't be distributed to anyone \n \n \n**", inline=True)
embed.add_field(name="__**Commands**__", value="__**Fun Commands**__ \n `quote` - Quote of Joker \n `fams` - Random DragonBall Z GIF \n `marvel` - Random Marvel GIF \n `dc` - Random DC GIF \n `joker` - Random Joker GIF (Tribute to Heath Ledger) \n`meme` - Random funny meme \n `movie <movie name>` - Gives info of the particular movie you have searched \n \n __**Bot and server releated commands**__ \n `botinfo` - Information about this BOT \n `serverinvite` - Server invitation link \n \n __**Misc Commands**__ \n `avatar` - Avatar of the user \n `avatar <user>` - Avatar of mentioned user \n \n __**Admin Commands**__ \n `poll` - Polling (Administrator) \n `askquestion` - Asking of funny question (Administrator) \n `announce <channel> <matter>` - To announce the entered matter (Administrator) \n \n **More Feautures coming soon...** \n \n __**BOT will be offline someties... That means we are updating BOT**__ \n **Thank you for using this BOT**")
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/516953091656908810/519072295080296469/Joker.png')
embed.set_footer(text=f'Requested by {ctx.message.author.name}', icon_url=f'{ctx.message.author.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def announce(ctx, channel: discord.TextChannel=None, *, msg: str):
if channel is None:
await ctx.send(" ```Proper usage is\n\nannounce<channel><matter>```")
else:
await channel.send(msg)
@client.command(pass_context=True)
async def rule1(ctx):
rule1 = f"**Server rule 1: Follow the discord ToS. Any violations of the terms of service will result in an immediate ban! (Ban) The ToS can be found here:** https://discordapp.com/terms"
await ctx.trigger_typing()
await ctx.send(rule1)
@client.command(pass_context=True)
async def rule2(ctx):
rule2= "** Server rule 2: What happens in Reformed stays in Reformed. Don't talk about how this server is better and vice versa, don't talk about the mods and how they are bad, dont ask me for unbans, dont talk shit about the members there, etc etc. (Warn/mute, ban)**"
await ctx.trigger_typing()
await ctx.send(rule2)
@client.command(pass_context=True)
async def rule3(ctx):
channel = ctx.message.channel
rule3 = "**Server rule 3: Swearing is allowed, but please keep it to a limit! Usage of banned words is not allowed! List is found here: https://cdn.discordapp.com/attachments/414216301771358208/454122060751437826/Screen_Shot_2018-05-23_at_6.03.31_PM-1.png (Warn, mute, ban)**"
await ctx.trigger_typing()
await channel.send(rule3)
@client.command(pass_context=True)
async def rule4(ctx):
channel2 = client.get_channel(565770888449097748)
rule4 = "**Server rule 4: Image posting is not allowed anywhere except in {}. Posting the same message/emote as other users repeatedly is not allowed. (Warn, mute, kick/ban) Excessive spamming of random characters/images is categorized as a raid and will lead to a (Ban)**".format(channel2.mention)
await ctx.trigger_typing()
await ctx.send(rule4)
@client.command(pass_context=True)
async def rule5(ctx):
rule5 = "**Server rule 5: Rudeness towards other members or trolling is not allowed! (Warn, mute, kick/ban)**"
await ctx.trigger_typing()
await ctx.send(rule5)
@client.command(pass_context=True)
async def rule6(ctx):
channel = ctx.message.channel
rule6 = "**Server rule 6: Harrassment is not allowed here! (Mute, kick/ban)**"
await ctx.trigger_typing()
await channel.send(rule6)
@client.command(pass_context=True)
async def rule7(ctx):
channel = ctx.message.channel
rule7 = "**Server rule 7: Disrespect towards members is not allowed! (Warn/mute, kick/ban)**"
await ctx.trigger_typing()
await channel.send(rule7)
@client.command(pass_context=True)
async def rule8(ctx):
rule8 = "**Server rule 8: Impersonation of other members is not allowed! (Warn, kick/ban)**"
await ctx.trigger_typing()
await ctx.send(rule8)
@client.command(pass_context=True)
async def rule9(ctx):
channel= ctx.message.channel
rule9 = "**Server rule 9: Discriminatory behavior like racism and sexism is not allowed here. (Warn/mute, ban)**"
await ctx.trigger_typing()
await channel.send(rule9)
@client.command(pass_context=True)
async def rule10(ctx):
rule10 = "**Server rule 10: NSFW (even if the image is cropped or blurred) or inappropriate images in this server are not allowed anywhere. (Warn, kick/ban: Ban for illegal content)**"
await ctx.trigger_typing()
await ctx.send(rule10)
@client.command(pass_context=True)
async def rule11(ctx):
rule11 = "**Server rule 11: DDoSing or revealing personal info about a member without their consent is not allowed. (Kick/ban)**"
await ctx.trigger_typing()
await ctx.send(rule11)
@client.command(pass_context=True)
async def rule12(ctx):
rule12 = "**Server rule 12: Vulgar or inappropriate names/nicknames are not allowed (warn/kick/ban**"
await ctx.trigger_typing()
await ctx.send(rule12)
@client.command(pass_context=True)
async def rule13(ctx):
rule13 = "**Server rule 13: Advertising in this server without staff permission is not allowed! (Warn, kick/ban)**"
await ctx.trigger_typing()
await ctx.send(rule13)
@client.command(pass_context=True)
async def rule14(ctx):
rule14 = "**Server rule 14: Joking about sensitive subjects such as rape, suicide/self-harm, death, serious illnesses, etc is not allowed! (Warn, mute, ban)**"
await ctx.trigger_typing()
await ctx.send(rule14)
@client.command(pass_context=True)
async def rule15(ctx):
rule15 = "**Server rule 15: DM Advertising is not allowed at all! (Warn, ban)**"
await ctx.trigger_typing()
await ctx.send(rule15)
@client.command(pass_context=True)
async def rule16(ctx):
rule16 = "**Server rule 16: Bullying members in any way or form is not allowed (Warn, ban)**"
await ctx.trigger_typing()
await ctx.send(rule16)
@client.command(pass_context=True)
async def rule17(ctx):
rule17 = "**Server rule 17: Leaving the server to evade mutes, warns, etc will result in double the punishment! (Warn/Mute x2, Ban)**"
await ctx.trigger_typing()
await ctx.send(rule17)
@client.command(pass_context=True)
async def rule18(ctx):
rule18 = "**Server rule 18: Alternative Accounts are not allowed! Only Owners and Admins are allowed to have alts for testing purposes mainly. Those caught with an Alternative Account may result in both accounts being Banned.**"
await ctx.trigger_typing()
await ctx.send(rule18)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def lock(ctx, Role:discord.Role= None, channel:discord.TextChannel=None):
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = False
overwrite.read_message_history = False
await channel.set_permissions(Role, overwrite = overwrite)
await ctx.send(f"**{channel.mention} has been locked for** `{Role.name}`")
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def unlock(ctx, Role:discord.Role=None, Channel:discord.TextChannel=None):
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = True
overwrite.read_messages = True
overwrite.read_message_history = True
await Channel.set_permissions(Role, overwrite = overwrite)
await ctx.send(f"**{Channel.mention} has been unlocked for** `{Role.name}`")
@client.command(pass_context = True)
@commands.has_permissions(ban_members = True)
async def tempmute(ctx, user: discord.Member, num: int, time: str, reason:str):
role = discord.utils.get(ctx.guild.roles, id=520653530529398784)
if time == 'm':
duration = num*60
await user.add_roles(role)
await ctx.send(f"{user.mention} is muted for {num} minutes for {reason}")
await asyncio.sleep(duration)
await user.remove_roles(role)
await ctx.send(f"Congractulations {user.mention}, you are unmuted after {num} minute(s). Don't try to get mute again..")
elif time == 'hr':
duration = num*3600
await user.add_roles(role)
await ctx.send(f"{user.mention} is muted for {num} minutes for {reason}")
await asyncio.sleep(duration)
await user.remove_roles(role)
await ctx.send(f"Congractulations {user.mention}, you are unmuted after {num} hour(s). Don't try to get mute again..")
elif time == 'd':
duration = num*86400
await user.add_roles(role)
await ctx.send(f"{user.mention} is muted for {num} minutes for {reason}")
await asyncio.sleep(duration)
await user.remove_roles(role)
await ctx.send(f"Congractulations {user.mention}, you are unmuted after {num} day(s). Don't try to get mute again..")
@client.command(pass_context = True)
async def spotify(ctx, user: discord.Member=None):
if user is None:
user = ctx.author
for activity in user.activities:
if isinstance(activity, Spotify):
embed = discord.Embed(title=f" ", description=f"{user.mention} is listening to...", color=activity.color)
embed.add_field(name="**Title**", value=activity.title, inline=False)
embed.add_field(name="**Artist**", value=activity.artist, inline=False)
embed.add_field(name="**Album**", value=activity.album, inline=False)
embed.add_field(name="**Duration**", value=activity.duration, inline=False)
embed.add_field(name="**Track ID**", value=activity.track_id, inline=False)
embed.set_thumbnail(url=activity.album_cover_url)
embed.set_author(name=user.name, icon_url=user.avatar_url)
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
else:
for activity in user.activities:
if isinstance(activity, Spotify):
embed = discord.Embed(title=f" ", description=f"{user.mention} is listening to...", color=activity.color)
embed.add_field(name="**Title**", value=activity.title, inline=False)
embed.add_field(name="**Artist**", value=activity.artist, inline=False)
embed.add_field(name="**Album**", value=activity.album, inline=False)
embed.add_field(name="**Duration**", value=activity.duration, inline=False)
embed.add_field(name="**Track ID**", value=activity.track_id, inline=False)
embed.set_thumbnail(url=activity.album_cover_url)
embed.set_author(name=user.name, icon_url=user.avatar_url)
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@client.command(pass_context=True)
@commands.has_permissions(manage_roles = True)
async def roleinfo(ctx, role: discord.Role=None):
embed = discord.Embed(title=f"Here's the info of {role} role...", description=" ", color=role.color)
embed.set_author(name=f"{ctx.guild.name}", icon_url=ctx.guild.icon_url)
embed.add_field(name="ID", value=role.id, inline=False)
embed.add_field(name="Name", value=role.name, inline=False)
embed.add_field(name="Permissions", value=role.permissions, inline=False)
embed.add_field(name="Guild/Server", value=ctx.guild.name, inline=False)
embed.add_field(name="The role is shown seperately from others", value=role.hoist, inline=False)
embed.add_field(name="Position of the role", value=role.position, inline=False)
embed.add_field(name="Time of creation", value=role.created_at.strftime("%d-%m-%Y %H:%M:%S"), inline=False)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@client.event
async def on_message_delete(message):
if message.author.bot:
return
else:
channel = client.get_channel(557273459244269582)
matter = f"Message sent by: {message.author.mention} deleted in {message.channel.mention} \n \n {message.content}"
embed = discord.Embed(title=f"{message.author.name}", description=matter, color=0XFF69BF)
embed.set_footer(text=f"Author {message.author.id} | Message ID: {message.id}")
embed.timestamp = datetime.datetime.utcnow()
await channel.send(embed=embed)
@client.event
async def on_member_remove(member):
channel = client.get_channel(565768324252958720)
channel2 = client.get_channel(557273459244269582)
userchannel = client.get_channel(571302888110817281)
person_count = len([member for member in member.guild.members if not member.bot])
embed=discord.Embed(title=f"Good bye {member.name}... Hope you'll come back again to {member.guild.name}", description="Thank you for being with us all these times...", color=0XFF69B4)
embed.set_thumbnail(url='https://media.giphy.com/media/LTFbyWuELIlqlXGLeZ/giphy.gif')
embed.add_field(name="__**Members Remaining**__", value='{}'.format(str(member.guild.member_count)), inline=True)
embed.timestamp = datetime.datetime.utcnow()
embed2=discord.Embed(title="Member Left", description= member.mention, color=0XFF69B4)
embed2.set_thumbnail(url=member.avatar_url)
embed2.add_field(name="**Members Remaining**", value=str(member.guild.member_count), inline=True)
embed2.set_footer(text=f"ID: {member.id}", icon_url=member.avatar_url)
embed2.timestamp = datetime.datetime.utcnow()
await channel.send(embed=embed)
await channel2.send(embed=embed2)
await userchannel.edit(name= f"Weebs: {person_count}")
@client.event
async def on_member_join(member):
choices = ["DcssawdeS", "Sasdawdd", "AWSdasdwaA", "AdwASwAas", "AsdWDAasas", "ASDwdAsad", "MKiojmkomM"]
choices2 = random.choice(choices)
role = discord.utils.get(member.guild.roles, id=516303012671520769)
embed = discord.Embed(title=" ", description=f"Welcome to {member.guild.name}, In order to send any message in the server, You must verify as per the server's policy. Sorry for bothering you but it's my duty though.... And please follow the instructions below. I'm sure that the instructions will be easy for you... Wait 15 seconds for next message.", color=0XFF69BF)
embed.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
embed2 = discord.Embed(title=" ", description="You've to type the word shown in the next message correctly. And you'vve got only three chances. If you failed to enter correct word, Then you'll get kicked from server and you've to join again... Wait 15 seconds for next message", color=0XFF69BF)
embed2.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed2.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
embed3 = discord.Embed(title=f"This is your first attempt (Two remaining)... Type the word shown below correctly... **\n \n {choices2} \n \n**", description=" ", color=0XFF69BF)
embed3.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed3.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
await member.send(embed=embed)
await asyncio.sleep(15)
await member.send(embed=embed2)
await asyncio.sleep(15)
await member.send(embed=embed3)
msg2 = await client.wait_for('message', check=lambda message: message.author == member)
if msg2.content == choices2:
embed4 = discord.Embed(title=f"Yayy!!! You've made it you've got {role.name} role enjoy your stay in {member.guild.name} server... Thanks for supporting us", description=" ", color=0XFF69BF)
embed4.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed4.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
await member.send(embed=embed4)
await member.add_roles(role)
else:
choices3 = random.choice(choices)
embed5 = discord.Embed(title=f"You've typed the wrong word... This is your second attempt (One remaining)... Type the word shown below correctly...** \n \n {choices3} \n \n**", description=" ", color=0XFF69BF)
embed5.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed5.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
await member.send(embed=embed5)
msg3 = await client.wait_for('message', check=lambda message: message.author == member)
if msg3.content == choices3:
embed4 = discord.Embed(title=f"Yayy!!! You've made it you've got {role.name} role enjoy your stay in {member.guild.name} server... Thanks for supporting us", description=" ", color=0XFF69BF)
embed4.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed4.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
await member.send(embed=embed4)
await member.add_roles(role)
else:
choices4 = random.choice(choices)
embed6 = discord.Embed(title=f"You've typed the wrong word... This is your last attempt... Type the word shown below correctly.. If yout typed wrong you'll get kicked from server...** \n \n {choices4} \n \n**", description=" ", color=0XFF69BF)
embed6.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed6.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
await member.send(embed=embed6)
msg4 = await client.wait_for('message', check=lambda message: message.author == member)
if msg4.content == choices4:
embed4 = discord.Embed(title=f"Yayy!!! You've made it you've got {role.name} role enjoy your stay in {member.guild.name} server... Thanks for supporting us", description=" ", color=0XFF69BF)
embed4.set_author(name=f"Verification for {member.guild.name}", icon_url=member.guild.icon_url)
embed4.set_footer(text=f"After this process you'll get {role.name} so that we sure you're verified", icon_url=member.avatar_url)
await member.send(embed=embed4)
await member.add_roles(role)
else:
await member.send("**You've entered a wrong word agian.... Your attempts are over... You've been kicked out of this server**")
await member.guild.kick(member, reason="**Unsuccessful Verification**")
return
gettime = discord.utils.snowflake_time(member.id)
channel = client.get_channel(565766644140474368)
channel2 = client.get_channel(557273459244269582)
text_channel = client.get_channel(565767003533737985)
userchannel = client.get_channel(571302888110817281)
person_count = len([member for member in member.guild.members if not member.bot])
embed=discord.Embed(title=f"Welcome {member.name} to {member.guild.name}", description=f"**Hope you'll be active here... Read rules at {text_channel.mention} channel and don't break any of them...**", color=0XFF69B4)
embed.set_thumbnail(url='https://media.giphy.com/media/OF0yOAufcWLfi/giphy.gif')
embed.add_field(name="__**Thanks for joining our server**__", value="We hope you a good stay here....")
embed.add_field(name="__**Time of joining**__", value=member.joined_at.date(), inline=True)
embed.add_field(name="__**Joining position**__", value='{}'.format(str(member.guild.member_count)), inline=True)
embed.add_field(name="__**User account created at**__", value=gettime.date(), inline=True)
embed.set_footer(text=member.name, icon_url=member.avatar_url)
embed.timestamp = datetime.datetime.utcnow()
embed2=discord.Embed(title="Member Joined", description=member.mention, color=0XFF69B4)
embed2.add_field(name="**Members Remaining**", value=str(member.guild.member_count), inline=True)
embed2.set_footer(text=f"ID: {member.id}", icon_url=member.avatar_url)
embed2.timestamp = datetime.datetime.utcnow()
await channel.send(embed=embed)
await channel2.send(embed=embed2)
await userchannel.edit(name= f"Weebs: {person_count}")
@client.event
async def on_message_edit(before,after):
if before.content != after.content:
channel = client.get_channel(557273459244269582)
matter = f"**Message edited in {before.channel.mention} **[Jump to message](https://discordapp.com/channels/{before.guild.id}/{after.channel.id}/{after.id})"
embed = discord.Embed(title=f"{before.author.name}", description=matter, color=0XFF69B4)
embed.add_field(name="Before", value=before.content, inline=False)
embed.add_field(name="After", value=after.content, inline=False)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f"ID: {before.id}")
await channel.send(embed=embed)
@client.event
async def on_guild_channel_create(channel):
channel2 = client.get_channel(557273459244269582)
embed = discord.Embed(title="New Channel Created", description=f"**Channel Created: {channel.mention}**", color=0XFF69BF)
embed.set_author(name=channel.guild.name, icon_url=channel.guild.icon_url)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f"ID: {channel.id}")
await channel2.send(embed=embed)
@client.event
async def on_guild_channel_delete(channel):
channel2 = client.get_channel(557273459244269582)
embed = discord.Embed(title="Channel Deleted", description=f"**Channel Deleted: {channel.name}**", color=0XFF69BF)
embed.set_author(name=channel.guild.name, icon_url=channel.guild.icon_url)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f"ID: {channel.id}")
await channel2.send(embed=embed)
@client.event
async def on_guild_channel_update(before, after):
channel2 = client.get_channel(557273459244269582)
if before.name != after.name:
embed = discord.Embed(title="Channel Name Edited", description=" ", color=0XFF69BF)
embed.set_author(name=after.guild.name, icon_url=after.guild.name)
embed.add_field(name="Before", value=before.name, inline=False)
embed.add_field(name="After", value=after.name, inline=False)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f"ID: {after.id}")
await channel2.send(embed=embed)
elif before.topic != after.topic:
embed2 = discord.Embed(title="Channel Topic Edited", description=f"Channel edited: {after.mention} ", color=0XFF69BF)
embed2.set_author(name=after.guild.name, icon_url=after.guild.icon_url)
embed2.add_field(name="Before", value=before.topic, inline=False)
embed2.add_field(name="After", value=after.topic, inline=False)
embed2.timestamp = datetime.datetime.utcnow()
embed2.set_footer(text=f"ID: {after.id}")
await channel2.send(embed=embed2)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def rules(ctx):
channel2 = client.get_channel(565770888449097748)
rule_1 = f"Follow the discord ToS. Any violations of the terms of service will result in an immediate ban! (Ban) The ToS can be found here: [Discord ToS](https://discordapp.com/terms)"
rule_2= "What happens in Reformed stays in Reformed. Don't talk about how this server is better and vice versa, don't talk about the mods and how they are bad, dont ask me for unbans, dont talk shit about the members there, etc etc. (Warn/mute, ban)"
rule_3 = "Swearing is allowed, but please keep it to a limit! Usage of banned words is not allowed! List is found here: [Click here](https://cdn.discordapp.com/attachments/414216301771358208/454122060751437826/Screen_Shot_2018-05-23_at_6.03.31_PM-1.png) (Warn, mute, ban)"
rule_4 = "Image posting is not allowed anywhere except in {}. Posting the same message/emote as other users repeatedly is not allowed. (Warn, mute, kick/ban) Excessive spamming of random characters/images is categorized as a raid and will lead to a (Ban)".format(channel2.mention)
rule_5 = "Rudeness towards other members or trolling is not allowed! (Warn, mute, kick/ban)"
rule_6 = "Harrassment is not allowed here! (Mute, kick/ban)"
rule_7 = "Disrespect towards members is not allowed! (Warn/mute, kick/ban)"
rule_8 = "Impersonation of other members is not allowed! (Warn, kick/ban)"
rule_9 = "Discriminatory behavior like racism and sexism is not allowed here. (Warn/mute, ban)"
rule_10 = "NSFW (even if the image is cropped or blurred) or inappropriate images in this server are not allowed anywhere. (Warn, kick/ban: Ban for illegal content)"
rule_11 = "DDoSing or revealing personal info about a member without their consent is not allowed. (Kick/ban)"
rule_12 = "Vulgar or inappropriate names/nicknames are not allowed (warn/kick/ban)"
rule_13 = "Advertising in this server without staff permission is not allowed! (Warn, kick/ban)"
rule_14 = "Joking about sensitive subjects such as rape, suicide/self-harm, death, serious illnesses, etc is not allowed! (Warn, mute, ban)"
rule_15 = "DM Advertising is not allowed at all! (Warn, ban)"
rule_16 = "Bullying members in any way or form is not allowed (Warn, ban)"
rule_17 = "Leaving the server to evade mutes, warns, etc will result in double the punishment! (Warn/Mute x2, Ban)"
rule_18 = "Alternative Accounts are not allowed! Only Owners and Admins are allowed to have alts for testing purposes mainly. Those caught with an Alternative Account may result in both accounts being Banned"
punishments = "**:beginner: 1 WARNING= NOTHING \n :beginner: 2 WARNINGS = MUTE FOR 12HR \n :beginner: 4 WARNINGS = MUTE FOR 1 WEEK \n :beginner: 10 WARNINGS = INSTANT KICK \n NOTE: SOFT SPAMS..U MAY GET MUTE BUT FOR HARMFUL SPAMS WILL GET U INSTANT BAN**"
embed = discord.Embed(title="**CONSTITUTION OF THIS SERVER... JK RULES OF THIS SERVER...**", description=None, color=0XFF69B4)
embed.add_field(name="**:beginner: Discord ToS apply... Coz he's the only boss here**", value=rule_1, inline=False)
embed.add_field(name="**:beginner: Keep it down**", value=rule_2, inline=False)
embed.add_field(name="**:beginner: No bad words please...**", value=rule_3, inline=False)
embed.add_field(name="**:beginner: We have a seperate channel for posting images and links...**", value=rule_4, inline=False)
embed.add_field(name="**:beginner: We don't support any type of trolling**", value=rule_5, inline=False)
embed.add_field(name="**:beginner: Don't harrass any wumpus around this server...**", value=rule_6, inline=False)
embed.add_field(name="**:beginner: Always give respect and take respect..**", value=rule_7, inline=False)
embed.add_field(name="**:beginner: Be who you are...**", value=rule_8, inline=False)
embed.add_field(name="**:beginner: All are equal here...**", value=rule_9, inline=False)
embed.add_field(name="**:beginner: This server is SFW not NSFW...**", value=rule_10, inline=False)
embed.add_field(name="**:beginner: This is no place for HACKERS...**", value=rule_11, inline=False)
embed.add_field(name="**:beginner: Everyone got their name, Don't create any new...**", value=rule_12, inline=False)
embed.add_field(name="**:beginner: You've got other ways to advertise...**", value=rule_13, inline=False)
embed.add_field(name="**:beginner: Joke about any good things...**", value=rule_14, inline=False)
embed.add_field(name="**:beginner: We can't tolerate this type of advertising...**", value=rule_15, inline=False)
embed.add_field(name="**:beginner: Bullying is a childish act btw...**", value=rule_16, inline=False)
embed.add_field(name="**:beginner: Don't try to avoid the punishment**", value=rule_17, inline=False)
embed.add_field(name="**:beginner: We don't want twins here...**", value=rule_18, inline=False)
embed.add_field(name="** \n \n Well, If you broke any rules above, You'll get warning or Instant Kick/Ban based on how severe your actions are... And the punishments decided by staff are as follows...**", value=punishments, inline=False)
embed.add_field(name="**:beginner: Final Words...**", value="Well, The constitution will be amended frequently, If you have any concerns about rules... You can contact any staff around here...", inline=False)
await ctx.send(embed=embed)
client.run(os.getenv('TOKEN'))
|
987,802 | a6deccc2155efe269783370f116254f68a0528a2 | from celery import shared_task
from notifications.models.task_notification import task_notification
from repository.models import UnlinkedConstituency
from .lda import (
update_commons_divisions,
update_constituencies,
update_election_results,
)
from .membersdataplatform import (
update_active_member_details,
update_all_member_details,
update_all_members_basic_info,
update_member_portraits,
update_member_portraits_wikipedia,
update_missing_member_portraits_wikipedia,
)
from .openapi import update_lords_divisions
@shared_task
@task_notification(label="Update profiles for active members")
def update_profiles_for_active_members(**kwargs):
_reset_unlinked_constituencies()
update_constituencies()
update_all_members_basic_info()
update_active_member_details()
@shared_task
@task_notification(label="Update profiles for all members")
def update_profiles_for_all_members(**kwargs):
_reset_unlinked_constituencies()
update_constituencies()
update_all_members_basic_info()
update_all_member_details()
def _reset_unlinked_constituencies():
"""
Clear any pre-existing UnlinkedConstituency instances. These are created while updating member data
and are temporary placeholder objects. They should be consumed via staff dashboard actions after update completes.
"""
UnlinkedConstituency.objects.all().delete()
@shared_task
@task_notification(label="Update Commons and Lords divisions")
def update_all_divisions(**kwargs):
update_commons_divisions(**kwargs)
update_lords_divisions(**kwargs)
|
987,803 | 1a8eca607e2370e7da145cdc0842f1cec73601df | from flask import jsonify
from app.api import bp
@bp.route('/ping', methods=['GET'])
def ping():
return jsonify('Pong!')
@bp.route('/test-email', methods=['GET'])
def test_email():
from flask import current_app
from app.utils.email import send_email
send_email('[Shui] Test Email',
sender=current_app.config['MAIL_SENDER'],
recipients=['164635470@qq.com'],
text_body='text body',
html_body='<h1>HTML body</h1>')
return jsonify('Send Email OK!') |
987,804 | 30081760b4c51bc959b0b30f5ac7e58dd480a854 | from django.shortcuts import render, redirect
from django.db.models import Sum
from django.contrib import messages
from apps.base.models import *
from apps.base.forms import *
def commercial(request, place_id=None):
places = Place.objects.all()
if not place_id and places:
return redirect ('commercial', place_id=places[0].id)
else:
tables = Table.objects.filter(place=place_id)
return render(request, 'commercial/index.html', locals())
def payer(request, table_id):
table = Table.objects.get(id=table_id)
commandes = Commande.objects.filter(table=table_id, reste__gt=0)
a_payer = commandes.aggregate(Sum('reste'))['reste__sum']
return render(request, 'commercial/payement.html', locals())
def stock(request):
places = Place.objects.all()
products = Produit.objects.all()
return render(request, 'commercial/stock.html', locals())
def requisition(request):
requisitions = Stock.objects.filter(is_valid=False, quantite__lt=0)
return render(request, 'commercial/requisition.html', locals())
def achats(request, product_id):
achat_form = StockForm(product_id, request.POST)
if request.method == "POST":
if achat_form.is_valid():
stock = achat_form.save(commit=False)
stock.personnel = request.user.personnel
stock.produit = Produit.objects.get(id = product_id)
stock.save()
messages.success(request, "approvisionnement effectuée")
achat_form = StockForm(product_id)
return render(request, 'commercial/form.html', locals())
def offre(request, product_id):
offre_form = OffreForm(request.POST)
if request.method == "POST":
if offre_form.is_valid():
offre = offre_form.save(commit=False)
offre.produit = Produit.objects.get(id = product_id)
offre.save()
messages.success(request, "offre ajoutée avec succes")
offre_form = OffreForm()
return render(request, 'commercial/form.html', locals())
def details(request, product_id):
stocks = Stock.objects.filter(produit = product_id)
return render(request, 'commercial/details.html', locals())
def tables(request):
return render(request, '404.html', locals())
|
987,805 | 9f81a58231c209d70053ffa3506890a721e15819 | from django.db import models
from django.utils.timezone import now
from django.contrib.auth.models import User
# Multiselectfield
from multiselectfield import MultiSelectField
# ckEditor
from ckeditor.fields import RichTextField
class Colaborador(models.Model):
nombre = models.CharField(
max_length=200,
verbose_name='Nombre',
)
created = models.DateField(
auto_now_add=True
)
updated = models.DateTimeField(
auto_now=True
)
class Meta():
verbose_name = "Compañero"
verbose_name_plural = 'Compañeros del Foro'
ordering = ['created']
def __str__(self):
return str(self.nombre)
# Fecha jQuerry
class grupoForo(models.Model):
nombre = models.ForeignKey(
User,
verbose_name='Nombre',
blank =True,
null=True,
on_delete=models.PROTECT
)
puesto = models.CharField(
max_length=200,
verbose_name='Puesto',
null=True
)
info = RichTextField(
verbose_name='Información',
max_length=8000
)
imagenPerfil = models.ImageField(
verbose_name="Imagen de perfil",
upload_to='nodos'
)
imagenBanner = models.ImageField(
verbose_name="Imágen del Banner",
upload_to='nodos'
)
imagenExtra = models.ImageField(
verbose_name="Imágen extra",
upload_to='nodos'
)
linkface = models.URLField(
verbose_name='link',
null=True,
blank=True
)
created = models.DateField( auto_now_add=True, verbose_name='Fecha de Creación')
updated = models.DateTimeField(auto_now=True, verbose_name='Fecha de Edición')
class Meta:
verbose_name = "Perfil"
verbose_name_plural = 'Perfiles'
ordering = ['-created']
def __str__(self):
return str(self.nombre) |
987,806 | 14e4973847e5bd39cefe0416b4251c6254ad8afe | import numpy as np
from tensorflow.keras.datasets import cifar100, mnist
from icecream import ic
### 가중치 저장(아주 중요!!!!!!!) / 순수하게 모델만 저장(나머지 다 주석처리) - 확장자는 무조건 .h5
'''
# 1. 데이터
(x_train, y_train), (x_test, y_test) = mnist.load_data()
ic(x_train.shape, y_train.shape)
ic(x_test.shape, y_test.shape)
# 1-2. x 데이터 전처리 - scaler:2차원에서만 가능
x_train = x_train.reshape(60000, 28 * 28) # 4차원 -> 2차원
x_test = x_test.reshape(10000, 28 * 28)
# 전처리 하기 -> scailing
print(x_train.shape, x_test.shape) # (50000, 3072)-2차원, (10000, 3072)-2차원
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, PowerTransformer, QuantileTransformer
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train) # fit_transform 은 x_train 에서만 사용한다!!!!!!!!!!!!
x_test = scaler.transform(x_test)
x_train = x_train.reshape(60000, 28, 28, 1) # 4차원으로 다시 reshape(Conv2d 사용해야 되니까)
x_test = x_test.reshape(10000, 28, 28, 1)
# 1-3. y 데이터 전처리 -> one-hot-encoding
ic(np.unique(y_train)) # 100개
# from tensorflow.keras.utils import to_categorical # 0,1,2 값이 없어도 무조건 생성/shape유연
# y_train = to_categorical(y_train)
# y_test = to_categorical(y_test)
# print(y_train.shape, y_test.shape)
from sklearn.preprocessing import OneHotEncoder # sklearn으로 되어 있는 애들은 모두 2차원으로 해줘야 함/OneHotEncoder는 무조건 2차원으로 해줘야 함
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
one = OneHotEncoder()
# one.fit(y_train)
y_train = one.fit_transform(y_train).toarray() # (50000, 100)
y_test = one.transform(y_test).toarray() # (10000, 100)
# ic(y_train.shape, y_test.shape)
'''
# 2. 모델 구성(GlobalAveragePooling2D 사용)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout, GlobalAveragePooling2D
model = Sequential()
model.add(Conv2D(128, kernel_size=(2, 2),
padding='valid', input_shape=(28, 28, 1), activation='relu'))
# model.add(Dropout(0, 2)) # 20% node Dropout
model.add(Dropout(0.2))
model.add(Conv2D(128, (2,2), padding='same', activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(128, (2,2),padding='valid', activation='relu'))
model.add(Dropout(0.2))
model.add(Conv2D(128, (2,2), padding='same', activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(64, (2,2), padding='valid', activation='relu'))
model.add(Dropout(0.2))
model.add(Conv2D(64, (2,2), padding='same', activation='relu'))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.save('./_save/keras45_1_save_model.h5') # 모델 저장 # 저장되는 확장자 : h5 # ./ : 현재위치(STUDY 폴더)
'''
# 3. 컴파일(ES), 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
import time
start_time = time.time()
hist = model.fit(x_train, y_train, epochs=10, verbose=1, callbacks=[es], validation_split=0.2, shuffle=True, batch_size=100)
end_time = time.time() - start_time
# 4. 평가, 예측
results = model.evaluate(x_test, y_test)
print("걸린시간 :", end_time)
print('category :', results[0])
print('accuracy :', results[1])
# # 시각화
# import matplotlib.pyplot as plt
# plt.figure(figsize=(9,5))
# # 1
# plt.subplot(2, 1, 1) # 2개의 플롯을 할건데, 1행 1열을 사용하겠다는 의미
# plt.plot(hist.history['loss'], marker='.', c='red', label='loss')
# plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss')
# plt.grid()
# plt.title('loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# # 2
# plt.subplot(2, 1, 2) # 2개의 플롯을 할건데, 1행 2열을 사용하겠다는 의미
# plt.plot(hist.history['acc'])
# plt.plot(hist.history['val_acc'])
# plt.grid()
# plt.title('acc')
# plt.ylabel('acc')
# plt.xlabel('epoch')
# plt.legend(['acc', 'val_acc'])
# plt.show()
걸린시간 : 98.12368416786194
category : 0.03186216577887535
accuracy : 0.991599977016449
''' |
987,807 | 951778cd1c2b5e1d8d3e95d03572b6945a0cb4d1 |
product = 1
i = 100
while i > 0:
product *= i
i -= 1
sum = 0
for elem in str(product):
sum += int(elem)
print(sum) |
987,808 | 39c0aca375e5b442b09e625eb5b6a85117f82ea1 | '''
Given a sorted array with possibly duplicate elements, the task is to find indexes of first and last occurrences of an element x in the given array.
Note: If the number x is not found in the array just print '-1'.
Input:
The first line consists of an integer T i.e number of test cases. The first line of each test case contains two integers n and x. The second line contains n spaced integers.
Output:
Print index of the first and last occurrences of the number x with a space in between.
Constraints:
1<=T<=100
1<=n,a[i]<=1000
Example:
Input:
2
9 5
1 3 5 5 5 5 67 123 125
9 7
1 3 5 5 5 5 7 123 125
Output:
2 5
6 6
'''
def occur(arr, n, x, flag):
lower, upper = 0, n-1
while (lower<=upper):
mid = (lower+upper)//2
if ((arr[mid]==x and flag == 1) or arr[mid]<x):
lower = mid+1
else:
upper = mid-1
return upper if flag == 1 else lower
t = int(input())
while(t>0):
n, x = map(int, input().split())
arr = list(map(int, input().split()))
upper = occur(arr, n, x, 1)
if (arr[upper]!=x):
print(-1)
else:
lower = occur(arr, n, x, 0)
print(lower, upper)
t-=1
|
987,809 | 2c9806c2002cb3555654d13cd06c128c388f114a | #!/usr/bin/env python
# coding: utf-8
# In[18]:
import pandas as pd
Location = "C:/Users/soura/OneDrive/Desktop/Data Visualization/datasets/BPS01.xlsx"
df = pd.read_excel(Location)
df.head()
# In[ ]:
# In[ ]:
|
987,810 | 90af63d4e7575cb32fa72daa500221bc853d8299 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
from collections import OrderedDict
__all__ = [
"wigner3j",
"get_camb_cl",
"expand_qb",
"scale_dust",
]
def blackbody(nu, ref_freq=353.0):
k = 1.38064852e-23 # Boltzmann constant
h = 6.626070040e-34 # Planck constant
T = 19.6
nu_ref = ref_freq * 1.0e9
# T = 2.725 #Cmb BB temp in K
nu *= 1.0e9 # Ghz
x = h * nu / k / T
x_ref = h * nu_ref / k / T
return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1)
def rj2cmb(nu_in, ccorr=True):
"""
planck_cc_gnu = {101: 1.30575, 141: 1.6835, 220: 3.2257, 359: 14.1835}
if ccorr:
if np.isscalar(nu_in):
for f, g in planck_cc_gnu.items():
if int(nu_in) == int(f):
return g
"""
k = 1.38064852e-23 # Boltzmann constant
h = 6.626070040e-34 # Planck constant
T = 2.72548 # Cmb BB temp in K
nu = nu_in * 1.0e9 # Ghz
x = h * nu / k / T
return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x))
def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False):
"""
Get the factor by which you must dividide the cross spectrum from maps of
frequencies freq0 and freq1 to match the dust power at ref_freq given
spectra index beta.
If deriv is True, return the frequency scaling at the reference beta,
and the first derivative w.r.t. beta.
Otherwise if delta_beta is given, return the scale factor adjusted
for a linearized offset delta_beta from the reference beta.
"""
freq_scale = (
rj2cmb(freq0)
* rj2cmb(freq1)
/ rj2cmb(ref_freq) ** 2.0
* blackbody(freq0, ref_freq=ref_freq)
* blackbody(freq1, ref_freq=ref_freq)
* (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0)
)
if deriv or delta_beta is not None:
delta = np.log(freq0 * freq1 / ref_freq ** 2)
if deriv:
return (freq_scale, freq_scale * delta)
return freq_scale * (1 + delta * delta_beta)
return freq_scale
def wigner3j(l2, m2, l3, m3):
r"""
Wigner 3j symbols computed for all valid values of ``L``, as in:
.. math::
\begin{pmatrix}
\ell_2 & \ell_3 & L \\
m_2 & m_3 & 0 \\
\end{pmatrix}
Arguments
---------
l2, m2, l3, m3 : int
The ell and m values for which to compute the symbols.
Returns
-------
fj : array_like
Array of size ``l2 + l3 + 2``, indexed by ``L``
lmin : int
The minimum value of ``L`` for which ``fj`` is non-zero.
lmax : int
The maximum value of ``L`` for which ``fj`` is non-zero.
"""
import camb
try:
from camb.mathutils import threej
except ImportError:
from camb.bispectrum import threej
arr = threej(l2, l3, m2, m3)
lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)])
lmax = l2 + l3
fj = np.zeros(lmax + 2, dtype=arr.dtype)
fj[lmin : lmax + 1] = arr
return fj, lmin, lmax
def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True):
"""
Compute camb spectrum with tensors and lensing.
Parameter values are from arXiv:1807.06209 Table 1 Plik best fit
Arguments
---------
r : float
Tensor-to-scalar ratio
lmax : int
Maximum ell for which to compute spectra
nt : scalar, optional
Tensor spectral index. If not supplied, assumes
slow-roll consistency relation.
spec : string, optional
Spectrum component to return. Can be 'total', 'unlensed_total',
'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'.
lfac: bool, optional
If True, multiply Cls by ell*(ell+1)/2/pi
Returns
-------
cls : array_like
Array of spectra of shape (lmax + 1, nspec).
Diagonal ordering (TT, EE, BB, TE).
"""
# Set up a new set of parameters for CAMB
import camb
pars = camb.CAMBparams()
# This function sets up CosmoMC-like settings, with one massive neutrino and
# helium set using BBN consistency
pars.set_cosmology(
H0=67.32,
ombh2=0.022383,
omch2=0.12011,
mnu=0.06,
omk=0,
tau=0.0543,
)
ln1010As = 3.0448
pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt)
if lmax < 2500:
# This results in unacceptable bias. Use higher lmax, then cut it down
lmax0 = 2500
else:
lmax0 = lmax
pars.set_for_lmax(lmax0, lens_potential_accuracy=2)
pars.WantTensors = True
pars.do_lensing = True
# calculate results for these parameters
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac)
totCL = powers[spec][: lmax + 1, :4].T
return totCL
def expand_qb(qb, bin_def, lmax=None):
"""
Expand a qb-type array to an ell-by-ell spectrum using bin_def.
Arguments
---------
qb : array_like, (nbins,)
Array of bandpower deviations
bin_def : array_like, (nbins, 2)
Array of bin edges for each bin
lmax : int, optional
If supplied, limit the output spectrum to this value.
Otherwise the output spectrum extends to include the last bin.
Returns
-------
cl : array_like, (lmax + 1,)
Array of expanded bandpowers
"""
lmax = lmax if lmax is not None else bin_def.max() - 1
cl = np.zeros(lmax + 1)
for idx, (left, right) in enumerate(bin_def):
cl[left:right] = qb[idx]
return cl
|
987,811 | 5550ec5b1a6fc585e0eaed942dca613167475758 | """Class definition for fixing image rotation."""
from kaishi.core.pipeline_component import PipelineComponent
from kaishi.image.labelers.generic_convnet import LabelerGenericConvnet
class TransformFixRotation(PipelineComponent):
"""Fix rotations of each image in a dataset given pre-determined labels (uses the default convnet for labels)."""
def __init__(self):
"""Initialize new transform component."""
super().__init__()
def __call__(self, dataset):
"""Perform the transformation operation on an image dataset.
:param dataset: image dataset to perform operation on
:type dataset: :class:`kaishi.image.dataset.ImageDataset`
"""
if not dataset.labeled:
LabelerGenericConvnet()(dataset)
dataset.labeled = True
for fobj in dataset.files:
if fobj.image is None or fobj.has_label("RECTIFIED"):
continue
if fobj.has_label("ROTATED_RIGHT"):
fobj.rotate(90)
fobj.remove_label("ROTATED_RIGHT")
elif fobj.has_label("ROTATED_LEFT"):
fobj.rotate(270)
fobj.remove_label("ROTATED_LEFT")
elif fobj.has_label("UPSIDE_DOWN"):
fobj.rotate(180)
fobj.remove_label("UPSIDE_DOWN")
fobj.add_label("RECTIFIED")
|
987,812 | ea707647b3d7dacccb9f262689218eecb7e8a0e3 | import functools
import sys
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
from flaskr.db import get_db
bp = Blueprint('group', __name__, url_prefix='/group')
@bp.route('/create', methods=('GET', 'POST'))
def createGroup():
print('Hello from python', flush=True)
if request.method == 'POST':
usernameToAdd = request.json['usernameToAdd']
db = get_db()
error = None
if not usernameToAdd:
error = 'Username to be added is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (usernameToAdd,)
).fetchone() is None:
error = 'User {} is not registered.'.format(usernameToAdd)
if error is None:
return {"success":True}
flash(error)
return render_template('auth/register.html')
|
987,813 | 0795a97993e9410995c25cf077339f43b1af93c2 | import numpy as np
import random
import os
import pickle
from matplotlib import pyplot as plt
class Player:
def __init__(self, name):
self.name = name
self.value = None
def move(self, board):
raise Exception
def set_value(self, value):
self.value = value
def update_final_board(self, board, board_prev, value):
pass
class RandomPlayer(Player):
def __init__(self, name):
Player.__init__(self, name)
def move(self, board):
while True:
x = random.randint(0, 2)
y = random.randint(0, 2)
if board[x, y] == 0:
return (x, y)
class DefencePlayer(Player):
'''
prevents opened from wining
'''
def __init__(self, name):
Player.__init__(self, name)
def get_empty_cell_row(self, row, board):
for j in range(3):
if board[row, j] == 0:
return j
def get_empty_cell_column(self, column, board):
for i in range(3):
if board[i, column] == 0:
return i
def move(self, board):
if self.value == -1:
self.opened_value = 1
else:
self.opened_value = -1
# check rows
for i in range(3):
if np.sum(board[i, :]) == self.opened_value * 2:
empty_cell_row = self.get_empty_cell_row(i, board)
if board[i, empty_cell_row] == 0:
return i, empty_cell_row
# check columns
for j in range(3):
if np.sum(board[:, j]) == self.opened_value * 2:
empty_cell_column = self.get_empty_cell_column(j, board)
if board[empty_cell_column, j] == 0:
return empty_cell_column, j
# check diagonals
if board[0, 0] + board[1, 1] + board[2, 2] == self.opened_value * 2:
for i in range(3):
if board[i, i] == 0:
return i, i
if board[2, 0] + board[1, 1] + board[0, 2] == self.opened_value * 2:
if board[2, 0] == 0:
return 2, 0
if board[1, 1] == 0:
return 1, 1
if board[0, 2] == 0:
return 0, 2
while True:
x = random.randint(0, 2)
y = random.randint(0, 2)
if board[x, y] == 0:
return (x, y)
class OffensivePlayer(Player):
'''
tries to win if it is possible on the next move
'''
def __init__(self, name):
Player.__init__(self, name)
def get_empty_cell_row(self, row, board):
for j in range(3):
if board[row, j] == 0:
return j
def get_empty_cell_column(self, column, board):
for i in range(3):
if board[i, column] == 0:
return i
def move(self, board):
# check rows
for i in range(3):
if np.sum(board[i, :]) == self.value * 2:
empty_cell_row = self.get_empty_cell_row(i, board)
if board[i, empty_cell_row] == 0:
return i, empty_cell_row
# check columns
for j in range(3):
if np.sum(board[:, j]) == self.value * 2:
empty_cell_column = self.get_empty_cell_column(j, board)
if board[empty_cell_column, j] == 0:
return empty_cell_column, j
# check diagonals
if board[0, 0] + board[1, 1] + board[2, 2] == self.value * 2:
for i in range(3):
if board[i, i] == 0:
return i, i
if board[2, 0] + board[1, 1] + board[0, 2] == self.value * 2:
if board[2, 0] == 0:
return 2, 0
if board[1, 1] == 0:
return 1, 1
if board[0, 2] == 0:
return 0, 2
while True:
x = random.randint(0, 2)
y = random.randint(0, 2)
if board[x, y] == 0:
return (x, y)
class OffensiveDefensivePlayer(Player):
'''
tries to win if it is possible on the next move
'''
def __init__(self, name):
Player.__init__(self, name)
def get_empty_cell_row(self, row, board):
for j in range(3):
if board[row, j] == 0:
return j
def get_empty_cell_column(self, column, board):
for i in range(3):
if board[i, column] == 0:
return i
def move(self, board):
if self.value == -1:
self.opened_value = 1
else:
self.opened_value = -1
# check rows
for i in range(3):
if np.sum(board[i, :]) == self.value * 2:
empty_cell_row = self.get_empty_cell_row(i, board)
if board[i, empty_cell_row] == 0:
return i, empty_cell_row
# check columns
for j in range(3):
if np.sum(board[:, j]) == self.value * 2:
empty_cell_column = self.get_empty_cell_column(j, board)
if board[empty_cell_column, j] == 0:
return empty_cell_column, j
# check diagonals
if board[0, 0] + board[1, 1] + board[2, 2] == self.value * 2:
for i in range(3):
if board[i, i] == 0:
return i, i
if board[2, 0] + board[1, 1] + board[0, 2] == self.value * 2:
if board[2, 0] == 0:
return 2, 0
if board[1, 1] == 0:
return 1, 1
if board[0, 2] == 0:
return 0, 2
# check rows defensive
for i in range(3):
if np.sum(board[i, :]) == self.opened_value * 2:
empty_cell_row = self.get_empty_cell_row(i, board)
if board[i, empty_cell_row] == 0:
return i, empty_cell_row
# check columns defensive
for j in range(3):
if np.sum(board[:, j]) == self.opened_value * 2:
empty_cell_column = self.get_empty_cell_column(j, board)
if board[empty_cell_column, j] == 0:
return empty_cell_column, j
# check diagonals defensive
if board[0, 0] + board[1, 1] + board[2, 2] == self.opened_value * 2:
for i in range(3):
if board[i, i] == 0:
return i, i
if board[2, 0] + board[1, 1] + board[0, 2] == self.opened_value * 2:
if board[2, 0] == 0:
return 2, 0
if board[1, 1] == 0:
return 1, 1
if board[0, 2] == 0:
return 0, 2
while True:
x = random.randint(0, 2)
y = random.randint(0, 2)
if board[x, y] == 0:
return (x, y)
class ProbabilityPlayer(Player):
'''
simulates moves and takes move with highest probability to win
'''
def __init__(self, name):
Player.__init__(self, name)
self.N = 10
def check_player_won(self, player, board):
for i in range(3):
if np.sum(board[:, i]) == player:
return True
if np.sum(board[i, :]) == player:
return True
if board[0, 0] + board[1, 1] + board[2, 2] == player:
return True
if board[2, 0] + board[1, 1] + board[0, 2] == player:
return True
return False
def check_game(self, board):
if self.check_player_won(3, board):
return 1
if self.check_player_won(-3, board):
return -1
return 0
def set_move(self, board, move, value):
if board[move[0], move[1]] == 0:
board[move[0], move[1]] = value
else:
raise Exception
def simulate_game(self, board):
player = RandomPlayer('rand1')
player.set_value(self.value)
opened = RandomPlayer('rand2')
opened.set_value(self.value * -1)
game_status = self.check_game(board)
if game_status == self.value:
return self.value
if game_status == self.value * -1:
return self.value * -1
if len(np.argwhere(board == 0)) == 0:
return 0
while True:
pos = opened.move(board)
self.set_move(board, pos, self.value * -1)
game_status = self.check_game(board)
if game_status == self.value:
return self.value
if game_status == self.value * -1:
return self.value * -1
if len(np.argwhere(board == 0)) == 0:
return 0
pos = player.move(board)
self.set_move(board, pos, self.value)
game_status = self.check_game(board)
if game_status == self.value:
return self.value
if game_status == self.value * -1:
return self.value * -1
if len(np.argwhere(board == 0)) == 0:
return 0
def get_probability(self, board):
win = loss = draw = 0
for i in range(self.N):
result = self.simulate_game(board.copy())
if result == self.value:
win += 1
if result == self.value * -1:
loss += 1
if result == 0:
draw += 1
# print('win {}'.format(win/self.N))
# print('loss {}'.format(loss/self.N))
# print('draw {}'.format(draw/self.N))
# print('sum {}'.format(win/self.N+loss/self.N+draw/self.N))
if win == self.N:
return 1
return win / self.N
def get_probability_board(self, board):
board_probability = np.array([[0., 0, 0],
[0, 0, 0],
[0, 0, 0]])
player_one = RandomPlayer('rand1')
player_two = RandomPlayer('rand2')
player_one.set_value(1)
player_two.set_value(-1)
for i in range(3):
for j in range(3):
if board[i, j] != 0:
continue
board_to_simulate = board.copy()
board_to_simulate[i, j] = self.value
probability = self.get_probability(board_to_simulate)
board_probability[i, j] = probability
return board_probability
def move(self, board):
probability_board = self.get_probability_board(board)
if np.sum(probability_board) > 0.000000001:
return np.argwhere(probability_board == np.max(probability_board))[0]
while True:
x = random.randint(0, 2)
y = random.randint(0, 2)
if board[x, y] == 0:
return (x, y)
class TemporalDifferenceTrainingPlayer(Player):
'''
implementation of temporal-difference learning method from [sutton, barto]
'''
def __init__(self, name):
Player.__init__(self, name)
self.boards = np.ndarray(shape=[0, 9], dtype=int)
self.values = []
self.alpha = 0.9
self.games = 0
self.alphas = [self.alpha]
def __del__(self):
root_dir = os.getcwd()
pickle_name = self.name + '.pickle'
pickle_file = os.path.join(root_dir, pickle_name)
try:
f = open(pickle_file, 'wb')
save = {
'boards': self.boards,
'values': self.values
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
def plot_alpha(self):
x = range(self.games+1)
plt.plot(x, self.alphas)
plt.xlabel('games')
plt.ylabel('alpha')
plt.show()
def update_final_board(self, board, board_prev, value):
index = self.get_index(board)
if index == -1:
if self.value == -1:
self.add_board(board * -1, value)
else:
self.add_board(board, value)
index = self.get_index(board_prev)
# update of last step, if game is lost
if value == 0:
# enemy turn needs no update
if index != -1:
V_current = self.values[index]
V_current = V_current + self.alpha * (0 - V_current)
self.values[index] = V_current
self.games += 1
self.alpha *= np.exp(-0.000000001 * self.games)
self.alphas.append(self.alpha)
def get_index(self, board):
index = np.where((self.boards == board.reshape(9)).all(axis=1))[0]
if len(index) == 0:
return -1
return index[0]
def add_board(self, board, value):
self.boards = np.concatenate((self.boards, board.reshape(-1, 9)))
self.values.append(value)
def check_player_won(self, value, board):
for i in range(3):
if np.sum(board[:, i]) == value:
return True
if np.sum(board[i, :]) == value:
return True
if board[0, 0] + board[1, 1] + board[2, 2] == value:
return True
if board[2, 0] + board[1, 1] + board[0, 2] == value:
return True
return False
def check_game(self, board):
if self.check_player_won(3, board):
return 1
index = self.get_index(board)
if index == -1:
self.add_board(board, 0.5)
return 0.5
else:
return self.values[index]
def get_value_board(self, board):
board_probability = -1 * np.ones([3, 3])
for i in range(3):
for j in range(3):
if board[i, j] != 0:
continue
board_next_move = board.copy()
board_next_move[i, j] = 1
probability_own_move = self.check_game(board_next_move)
board_probability[i, j] = probability_own_move
return board_probability
def move(self, board):
exploit = random.random()
if exploit > 0.2:
index = self.get_index(board)
if index == -1:
V_t_current = 0.5
self.add_board(board, V_t_current)
else:
V_t_current = self.values[index]
if self.value == -1:
value_board = self.get_value_board(board * -1)
else:
value_board = self.get_value_board(board)
if np.sum(value_board) != -9:
V_t_next = np.max(value_board)
move = np.argwhere(value_board == np.max(value_board))[0]
self.values[index] = V_t_current + self.alpha * (V_t_next - V_t_current)
return move
# explore
else:
while True:
x = random.randint(0, 2)
y = random.randint(0, 2)
if board[x, y] == 0:
return (x, y)
class TemporalDifferencePlayer(Player):
'''
implementation of temporal-difference learning method from [sutton, barto]
'''
def __init__(self, name):
Player.__init__(self, name)
root_dir = os.getcwd()
pickle_name = 'td_random.pickle'
pickle_file = os.path.join(root_dir, pickle_name)
try:
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
self.boards = save['boards']
self.values = save['values']
del save
except Exception as e:
print('Unable to load data to', pickle_file, ':', e)
raise
def get_index(self, board):
index = np.where((self.boards == board.reshape(9)).all(axis=1))[0]
if len(index) == 0:
return -1
return index[0]
def get_value_board(self, board):
board_probability = -1 * np.ones([3, 3])
for i in range(3):
for j in range(3):
if board[i, j] != 0:
continue
board_next_move = board.copy()
board_next_move[i, j] = 1
index = self.get_index(board_next_move)
if index == -1:
board_probability[i, j] = 0.5
else:
board_probability[i, j] = self.values[index]
return board_probability
def move(self, board):
if self.value == -1:
value_board = self.get_value_board(board * -1)
else:
value_board = self.get_value_board(board)
move = np.argwhere(value_board == np.max(value_board))[0]
return move
class HumanPlayer(Player):
def __init__(self, name):
Player.__init__(self, name)
def move(self, board):
while True:
move = input("move ")
x = int(move[0]) - 1
y = int(move[1]) - 1
if board[x, y] == 0:
return (x, y)
else:
print('not vaild, please retry')
|
987,814 | ccbfb2e81893cc7d40b7b1b2b3f90cd8794094d0 | $NetBSD$
Make it recognize DragonFlyBSD
--- src/calibre/constants.py.orig 2012-04-13 04:21:01.000000000 +0000
+++ src/calibre/constants.py
@@ -28,7 +28,8 @@ isosx = 'darwin' in _plat
isnewosx = isosx and getattr(sys, 'new_app_bundle', False)
isfreebsd = 'freebsd' in _plat
isnetbsd = 'netbsd' in _plat
-isbsd = isfreebsd or isnetbsd
+isdragonflybsd = 'dragonfly' in _plat
+isbsd = isfreebsd or isnetbsd or isdragonflybsd
islinux = not(iswindows or isosx or isbsd)
isfrozen = hasattr(sys, 'frozen')
isunix = isosx or islinux
|
987,815 | e95307d1a961c61427163f09ae084fc9c5befcd4 | class AdvancedArithmetic(object):
def divisorSum(n):
raise NotImplementedError
class Calculator(AdvancedArithmetic):
def __init__(self):
self.lst = list()
#self.answer = 0
#self.x
def divisorSum(self, n):
answer = 0
x = list(range(n+1))
for i in x[1:]:
if n%i == 0:
self.lst.append(i)
else:
continue
for i in self.lst:
answer = answer + i
return answer
n = int(input())
my_calculator = Calculator()
s = my_calculator.divisorSum(n)
print("I implemented: " + type(my_calculator).__bases__[0].__name__)
print(s) |
987,816 | 56e819e42e0ffcb044e5ba5845154f16c7a93e15 | import datetime as dt
import json
import os
from datetime import datetime
import bs4
import pandas as pd
import requests
import yfinance as yf
from pandas_datareader import data as pdr
yf.pdr_override()
info_types = ["info", "options", "dividends",
"mutualfund_holders", "institutional_holders",
"major_holders", "calendar", "actions", "splits"]
def date_parse(d):
return datetime.strptime(d, "%Y-%m-%d")
def load_price_history(symbol, start_date=dt.datetime(2000, 1, 1), end_date=dt.datetime.now(), market="us",
reload=True):
now = dt.datetime.now()
symbol = symbol.lower().strip()
symbol_filename = "-".join(symbol.split("."))
file_path = f"data/{market}/price_history/{symbol_filename}.csv"
if reload:
if os.path.isfile(file_path): # download only data from one day after latest date in csv
df_old = pd.read_csv(file_path, index_col=0, parse_dates=True)
if len(df_old) == 0:
df = pdr.get_data_yahoo(symbol, start_date, end_date)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
oldest_saved_date = df_old.index[0]
lastest_saved_date = df_old.index[-1]
try:
if start_date < oldest_saved_date:
df_older = pdr.get_data_yahoo(symbol, start_date, oldest_saved_date - dt.timedelta(days=1))
df_older = df_older[(df_older.index >= start_date) & (df_older.index < oldest_saved_date)]
df_old = pd.concat([df_older, df_old])
df_old = df_old[df_old.index < lastest_saved_date]
df_new = pdr.get_data_yahoo(symbol, lastest_saved_date, now)
df_new = df_new[df_new.index >= lastest_saved_date]
df_new = df_new[~df_new.index.duplicated(keep="first")]
df = pd.concat([df_old, df_new])
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df[(df.index >= start_date) & (df.index <= end_date)]
except TypeError:
df = pdr.get_data_yahoo(symbol, start_date, end_date)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
else: # no csv exits
df = pdr.get_data_yahoo(symbol, start_date, end_date)
directory = f"data/{market}/price_history"
if not os.path.exists(directory):
os.makedirs(directory)
print(file_path)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
else: # don't reload
df = pd.read_csv(file_path, index_col=0, parse_dates=True)
try:
return df[
(pd.to_datetime(df.index).floor('D') >= start_date) & (pd.to_datetime(df.index).floor('D') <= end_date)]
except TypeError:
df = pdr.get_data_yahoo(symbol, start_date, end_date)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
def reload_all(symbols, start_date=dt.datetime(2000, 1, 1), end_date=dt.datetime.now()):
symbols = remove_duplicates(symbols)
for symbol in symbols:
df = load_price_history(symbol, start_date, end_date)
print(df.index[0], df.index[-1])
def reload_sandp500():
symbols = load_sandp500_symbols()
print(symbols)
reload_all(load_sandp500_symbols())
def load_ticker_info(symbol, market="us", type_str="info", reload=False):
symbol = symbol.lower().strip()
if type_str in ["info", "isin", "options"]:
extn = "json"
else:
extn = "csv"
symbol_filename = "-".join(symbol.split("."))
file_path = f"data/{market}/info/{type_str}/{symbol_filename}.{extn}"
if reload or not os.path.isfile(file_path):
print(f"Downloading {type_str} for {symbol}..")
ticker = yf.Ticker(symbol)
if type_str in ["info", "options"]:
extn = "json"
file_path = f"data/{market}/info/{type_str}/{symbol}.{extn}"
if type_str == "info":
try:
info_dict = ticker.info
except:
print(f"Error, no info found for {symbol}.")
return
else:
info_dict = ticker.options
with open(file_path, "w") as f:
json.dump(info_dict, f, indent=4)
return info_dict
else:
extn = "csv"
file_path = f"data/{market}/info/{type_str}/{symbol}.{extn}"
if type_str == "actions":
info_df = ticker.actions
elif type_str == "calendar":
info_df = ticker.calendar
elif type_str == "dividends":
info_df = ticker.dividends
elif type_str == "institutional_holders":
info_df = ticker.institutional_holders
elif type_str == "major_holders":
info_df = ticker.major_holders
elif type_str == "mutualfund_holders":
info_df = ticker.mutualfund_holders
elif type_str == "splits":
info_df = ticker.splits
else:
print(f"Error - info type \"{type_str}\" is not a valid option.")
return
info_df.to_csv(file_path)
return info_df
else:
if extn == "json":
with open(file_path, "r") as f:
return json.load(f)
elif extn == "csv":
return pd.read_csv(file_path)
def load_sandp500_symbols(reload=False):
if reload:
symbols = []
with requests.get("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies") as resp:
soup = bs4.BeautifulSoup(resp.content, "lxml")
for table in soup.find_all("table", {"class": "wikitable"}):
for tr in table.find("tbody").find_all("tr"):
td = tr.find("td")
if td:
symbols.append("-".join(td.text.strip().split(".")).lower())
break
with open("data/us/symbols.txt", "w") as f:
for symbol in symbols:
f.write(symbol + "\n")
return symbols
else:
with open("data/us/symbols.txt", "r") as f:
return [line.strip() for line in f]
def remove_duplicates(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def all_prices_df(market="us", reload=True):
if reload:
symbols = load_sandp500_symbols()
all_df = pd.DataFrame()
for symbol in symbols:
df = load_price_history(symbol, reload=False)[["Adj Close"]].rename({"Adj Close": symbol}, axis=1)
all_df = all_df.join(df, how="outer")
all_df.reset_index(level=0).to_csv(f"data/{market}/price_history/all/all.csv", index=False,
date_format="%Y-%m-%d")
return all_df
else:
return pd.read_csv(f"data/{market}/price_history/all/all.csv", index_col=0, parse_dates=True)
def weekly(df):
return df.resample("W", label="left").agg({"Open": "first", "High": "max", "Low": "min",
"Close": "last", "Adj Close": "last", "Volume": "sum"})
def monthly(df):
return df.resample("M", label="left").agg({"Open": "first", "High": "max", "Low": "min",
"Close": "last", "Adj Close": "last", "Volume": "sum"})
def watchlist():
with open("data/watchlist2.txt", "r") as f:
return f.read().split("\n")
|
987,817 | f1262f33aa271ca66adf7247c01bb6ef4a011b96 | # !user/bin/python
# -*- coding: UTF-8 -*-
import pandas as pd
import numpy as np
df = pd.read_excel('sales_transactions.xlsx')
print(df)
# account name order sku quantity unit price ext price
# 0 383080 Will LLC 10001 B1-20000 7 33.69 235.83
# 1 383080 Will LLC 10001 S1-27722 11 21.12 232.32
# 2 383080 Will LLC 10001 B1-86481 3 35.99 107.97
# 3 412290 Jerde-Hilpert 10005 S1-06532 48 55.82 2679.36
# 4 412290 Jerde-Hilpert 10005 S1-82801 21 13.62 286.02
# 5 412290 Jerde-Hilpert 10005 S1-06532 9 92.55 832.95
# 6 412290 Jerde-Hilpert 10005 S1-47412 44 78.91 3472.04
# 7 412290 Jerde-Hilpert 10005 S1-27722 36 25.42 915.12
# 8 218895 Kulas Inc 10006 S1-27722 32 95.66 3061.12
# 9 218895 Kulas Inc 10006 B1-33087 23 22.55 518.65
# 10 218895 Kulas Inc 10006 B1-33364 3 72.30 216.90
# 11 218895 Kulas Inc 10006 B1-20000 -1 72.18 -72.18
# apply可以对多个列进行操作,返回的也可以不是标量,而是向量
# 方法一
def add(x): # 这里的x指的是整个数据帧
return x['quantity'] + x['unit price']
print(df.groupby('order').apply(add)) # apply的第一个形参是self,传入的是分好后的每组
# order
# 10001 0 40.69
# 1 32.12
# 2 38.99
# 10005 3 103.82
# 4 34.62
# 5 101.55
# 6 122.91
# 7 61.42
# 10006 8 127.66
# 9 45.55
# 10 75.30
# 11 71.18
# dtype: float64
print('-' * 20)
# 方法二
print(df.groupby('order').apply(lambda x : x['quantity'] + x['unit price']))
# order
# 10001 0 40.69
# 1 32.12
# 2 38.99
# 10005 3 103.82
# 4 34.62
# 5 101.55
# 6 122.91
# 7 61.42
# 10006 8 127.66
# 9 45.55
# 10 75.30
# 11 71.18
# dtype: float64
print('-' * 100)
print(df.groupby('order', group_keys = False).apply(lambda x : x['quantity'] + x['unit price']))
# 0 40.69
# 1 32.12
# 2 38.99
# 3 103.82
# 4 34.62
# 5 101.55
# 6 122.91
# 7 61.42
# 8 127.66
# 9 45.55
# 10 75.30
# 11 71.18
# dtype: float64
# 如果用apply返回标量,和agg还是有一定的区别
# apply是对每一列求标量,包括分组那一列
print(df.groupby('order').apply(np.min))
# account name order sku quantity unit price ext price
# order
# 10001 383080 Will LLC 10001 B1-20000 3 21.12 107.97
# 10005 412290 Jerde-Hilpert 10005 S1-06532 9 13.62 286.02
# 10006 218895 Kulas Inc 10006 B1-20000 -1 22.55 -72.18
# 而agg是对除了分组之外的其他所有求标量
print(df.groupby('order').aggregate(np.min))
# account name sku quantity unit price ext price
# order
# 10001 383080 Will LLC B1-20000 3 21.12 107.97
# 10005 412290 Jerde-Hilpert S1-06532 9 13.62 286.02
# 10006 218895 Kulas Inc B1-20000 -1 22.55 -72.18
# 两者都可以指定同一列求标量
print(df.groupby('order')['ext price'].apply(np.min))
# order
# 10001 107.97
# 10005 286.02
# 10006 -72.18
# Name: ext price, dtype: float64
print(df.groupby('order')['ext price'].aggregate(np.min))
# order
# 10001 107.97
# 10005 286.02
# 10006 -72.18
# Name: ext price, dtype: float64 |
987,818 | 0363750f004a8b8461cb6e02c2b660b50624400d | #!/usr/bin/env python
# -*-coding:UTF-8 -*-
import sys
import os
import pymysql # Module used to connect to a MySQL database
import smtplib # Module for using SMTP functions for sending mail via script
import time
from datetime import date
from oneIncident import envoi_oneinc #Sending function of the incident alert 80%
from oneRequest import envoi_onereq #Sending the alert function for request 80%
from secondIncident import envoi_secinc #Sending function of the incident alert 100%
from secondRequest import envoi_secreq #Sending the alert function for request 100%
today = date.today().strftime('%Y-%m-%d') #today function that retrieves the current date
premier_du_mois = date.today().strftime('%Y-%m-01') #Function that returns the first of the current month
incidents = 0 #Variable used to counter incidents
demandes = 0 #Variable used to counter request
#SQL function to retrieve information about the tickets contained in the GLPI database
connection = pymysql.connect(host='XXXXXX', user='MyUser', password='MyPassword', db='MyDB') #Connect to GLPI database
cur = connection.cursor()
#SQL query to count the number of resolved incidents /// #status=6 for the solved
cur.execute("SELECT COUNT (*) FROM glpi_tickets WHERE status=5 AND type=1 AND solutiontypes_id=1 OR solutiontypes_id=2 AND solveddate BETWEEN %s AND %s", (today, premier_du_mois))
#SQL query to count the number of resolved request /// #status=6 for the solved
cur.execute("SELECT COUNT (*) FROM glpi_tickets WHERE status=5 AND type=2 AND solutiontypes_id=1 OR solutiontypes_id=2 AND solveddate BETWEEN %s AND %s", (today, premier_du_mois))
cur.close()
connection.close()
if incidents < 172 or incidents > 214:
print("The message you want")
else:
envoi_oneinc()
if incidents >= 215:
envoi_secinc()
if demandes < 60 or demandes > 75:
print("The message you want")
else:
envoi_onereq()
if demandes >= 76:
envoi_secreq()
|
987,819 | 01ee69ba160668d450314017f70365f56d667f69 | #!/usr/bin/python
import RPi.GPIO as GPIO
import sys
LED_RED = 7 # 3 color led on POE HAT
LED_GREEN = 22 # 3 color led on POE HAT
LED_BLUE = 9 # 3 color led on POE HAT
LED_RJ45 = 25 # second green led on rj45 connector
def initialize():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED_RED, GPIO.OUT)
GPIO.setup(LED_GREEN, GPIO.OUT)
GPIO.setup(LED_BLUE, GPIO.OUT)
GPIO.setup(LED_RJ45, GPIO.OUT)
def led(value):
GPIO.output(LED_RJ45, value & 8 != 0)
GPIO.output(LED_RED, value & 4 != 0)
GPIO.output(LED_GREEN, value & 2 != 0)
GPIO.output(LED_BLUE, value & 1 != 0)
def main():
initialize()
if len(sys.argv) == 1:
led(0)
else:
led(int(sys.argv[1]))
if __name__ == "__main__":
main()
|
987,820 | c4b8881ffc12212bc6962350da93f52eb04fd762 | """ Wraptor
Provides a set of useful decorators and other wrap-like python utility functions
"""
__version__ = "1.0.0"
|
987,821 | 85177214ee1d0043aeea6238d4dd18bfc52d0d61 | import base64
import json
import socket
from Helpers.Helper import send_command
from collections import deque
from Helpers.StoppableThread import StoppableThread
from Helpers.Helper import socketmanager
from Models.Node import Node
from RoutingTable import RoutingTable
from Helpers.Helper import xor
from Helpers.NodeExceptions import NodeNotFoundException
class Client(StoppableThread):
def __init__(self, node: Node, routing_table: RoutingTable, command_queue: deque, k: int, alpha: int):
StoppableThread.__init__(self)
self.node = node
self.routing_table = routing_table
self.k = k
self.alpha = alpha
self.command_queue = command_queue
self.queried_nodes = []
self.min_distance = None
print(f"Client has started.\n"
f"id: {self.node.id}\n"
f"ip: {self.node.ip}\n"
f"port: {self.node.port}")
def run(self):
discovered_node = self.find_node(self.node.id)
if discovered_node.ip != self.node.ip or discovered_node.port != self.node.port:
raise Exception("This id is already taken")
while True:
if self.stopped():
return
if len(self.command_queue) > 0:
cmd = self.command_queue.pop()
self.handle_command(cmd)
def _send_find_node(self, node: Node, node_to_search_id: str) -> list:
with socketmanager(socket.AF_INET, socket.SOCK_STREAM) as s:
print(f"Connecting to {node.ip}:{node.port} (FIND_NODE)")
s.connect((node.ip, node.port))
s.send(bytes(f"{self.node.id}:{self.node.port} FIND_NODE {node_to_search_id}", encoding="utf-8"))
res = s.recv(1024)
lst = []
for str_node in json.loads(res.decode(encoding='utf-8')):
lst.append(Node(None, None, None).init_from_dict(str_node))
return lst
def find_node(self, node_to_search_id: str, found_nodes: list = None) -> Node:
if found_nodes is None:
found_nodes = self.routing_table.get_closest_nodes(node_to_search_id, self.alpha)
else:
for node in found_nodes:
self.routing_table.add_node(node)
found_node = [node for node in found_nodes if node.id == node_to_search_id]
if len(found_node) is not 0:
self.queried_nodes = []
self.min_distance = None
return found_node[0]
good_nodes = self.get_closest_found_nodes(found_nodes, node_to_search_id)
if len(good_nodes) == 0:
nodes_to_request = self.get_not_requested_nodes(found_nodes)
if len(nodes_to_request) == 0:
self.queried_nodes = []
self.min_distance = None
raise NodeNotFoundException(f"Can't find node with id {node_to_search_id}")
return self.send_find_requests(nodes_to_request[:self.k], node_to_search_id)
return self.send_find_requests(good_nodes[:self.alpha], node_to_search_id)
def send_find_requests(self, nodes, node_to_search_id):
for node in nodes:
new_nodes = self._send_find_node(node, node_to_search_id)
self.queried_nodes.append(node)
return self.find_node(node_to_search_id, new_nodes)
def get_closest_found_nodes(self, found_nodes: list, node_to_search_id):
good_nodes = []
for node in found_nodes:
if self.min_distance is None or xor(node.id, node_to_search_id) < self.min_distance:
self.min_distance = xor(node.id, node_to_search_id)
good_nodes.append(node)
return good_nodes
def get_not_requested_nodes(self, nodes):
n = []
for node in nodes:
if len([x for x in self.queried_nodes if x.id == node.id]) == 0:
n.append(node)
return n
def handle_command(self, cmd: str):
node_id, command, content = cmd.split(' ')
node = self.find_node(node_id)
b64_content = base64.b64encode(bytes(content, encoding="utf-8"))
send_command(self.node, node, command, b64_content.decode("utf-8"))
|
987,822 | 72131b42ca90e10b9d1b39f8188b642db763bd3c | import pymssql
import pandas_datareader.data as pd
import datetime
today = datetime.datetime.now().strftime('%Y%m%d')
yesterday = (datetime.datetime.now() + datetime.timedelta(days=-8)).strftime('%Y%m%d')
def shcode_load(db_adr,id,pw,db_name):
global shcode_list
shcode_list = []
if(db_adr == ''):
db_adr = 'localhost'
try:
conn = pymssql.connect(host=db_adr, user=id, password=pw, database=db_name,charset='utf8',as_dict=True)
cur = conn.cursor()
sql01 = 'select top 2 shcode from t8430 order by 1'
cur.execute(sql01)
for row in cur:
shcode_list = shcode_list + list(row.values())
conn.close()
return shcode_list
except:
conn.close()
print('DB접속정보 확인요망')
# print(shcode_load('localhost','jesus','3477','test'))
#################################
# def pandas_load(start,end):
shcode_load('localhost','jesus','3477','test')
start = ''
end = ''
if(start == ''):
start = yesterday
if(end == ''):
end = today
## for 문 시작
pd_data = ()
for i in range(len(shcode_list)):
pd_data = pd.DataReader("KRX:" + shcode_list[i], "google", start, end)
date = list(pd_data.index.strftime('%Y%m%d'))
open = list(pd_data['Open'])
high = list(pd_data['High'])
low = list(pd_data['Low'])
close = list(pd_data['Close'])
vol = list(pd_data['Volume'])
pd_list = [date,open, high, low, close, vol]
# for j in range(len(date)):
# date[j]
print(shcode_list[0])
print(pd_list)
test = []
test2 = []
for a in range(len(date)):
for b in range(6): #date ~ vol 까지 컬럼
test.append(pd_list[b][a])
test2 = test2,test
# print(pd_list[0][0])
print(test)
print(test2)
# return x
# print(pd_data.index)
#
# target_list = []
#
# target_list.append(date[0])
# target_list.append(high[0])
#
# print(date[0])
# + open[0] + high[0] + low[0] + close[0] + vol[0])
#
# print(target_list)
#
# print(data.keys())
# print(data.Open.values())
|
987,823 | 725ffad5211dfa3097fa15b20f88e8f757ef7aec | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# local_settings.py
'''
@author: friend
'''
import numpy, sys
# pickle,
import __init__
__init__.setlocalpythonpath()
import local_settings as ls
from maths.tils import TilR
# from models.Files.ReadConcursosHistory import ConcursosHistoryPickledStorage
from generators.GeradorIter import Gerador
import libfunctions.filters.filter_functions_dependent as filter_fd
from libfunctions.pattern_string_et_al.stringpatterns_functions import convert_intlist_to_spaced_zfillstr
def generate_all_combinations_against_excluding_tilrpatterns():
# the excluding list was constructed with tilrpatterns occurring less than 4 times, there are 97 excluding wpatterns in a total of 180
excluding_tilrwpatterns = ['00006', '00060', '00600', '06000', '60000', '00015', '00051', '00105', '00150', '00501', '00510', '01005', '01050', '01500', '05001', '05010', '05100', '10005', '10050', '10500', '15000', '50001', '50010', '50100', '51000', '00024', '00042', '00204', '00240', '00402', '00420', '02004', '02040', '02400', '04002', '04020', '04200', '20004', '20040', '20400', '24000', '40002', '40020', '40200', '42000', '00114', '00141', '00411', '01014', '01041', '01401', '01410', '04011', '04101', '04110', '10014', '10104', '10140', '10401', '10410', '11004', '11040', '11400', '14001', '14010', '14100', '40011', '40101', '41001', '41010', '41100', '00033', '00303', '00330', '03003', '03030', '03300', '30003', '30030', '30300', '33000', '00132', '00213', '00321', '02031', '02103', '02301', '02310', '03012', '10203', '10302', '13002', '13020', '20310', '23001', '32100', '00222']
n_slots=5; soma=6
tilstats_reused_for_excluding_wpatterns = TilR.TilStats(n_slots, soma)
for wpattern in excluding_tilrwpatterns:
tilstats_reused_for_excluding_wpatterns.add_pattern_as_str(wpattern)
#slider = ConcursoExt()
#n_last_concurso = slider.get_n_last_concurso()
filename = ls.GENERATED_DATA_DIR + 'all_combinations_against_excluding_tilrpatterns.blob'
fileobj = open(filename, 'w')
#pickler = pickle.Pickler(fileobj, pickle.HIGHEST_PROTOCOL)
gerador = Gerador()
n_passed = 0
print 'Processing', len(gerador), 'games, please wait.'
for jogo_as_dezenas in gerador:
bool_result = filter_fd.filter_in_those_not_having_tilrwpatterns(jogo_as_dezenas, tilstats_reused_for_excluding_wpatterns, history_nDoConc_range=None)
if bool_result:
np_dezenas = numpy.array(jogo_as_dezenas)
# pickler.dump(np_dezenas)
dezenas_zfill2 = convert_intlist_to_spaced_zfillstr(np_dezenas)
output_line = dezenas_zfill2 + '\n'
fileobj.write(output_line)
n_passed += 1
all_index = gerador.iterator.session_index
diff = all_index - n_passed
print bool_result, dezenas_zfill2, 'n_passed=%d, all_index=%d, diff=%d' %(n_passed, all_index, diff)
output_line = 'n_passed=%d, all_index=%d, diff=%d' %(n_passed, all_index, diff)
print output_line
fileobj.write(output_line)
fileobj.close()
def process():
'''
'''
generate_all_combinations_against_excluding_tilrpatterns()
pass
def adhoc_test():
'''
'''
pass
import unittest
class MyTest(unittest.TestCase):
def test_1(self):
pass
def look_up_cli_params_for_tests_or_processing():
for arg in sys.argv:
if arg.startswith('-t'):
adhoc_test()
elif arg.startswith('-u'):
# unittest complains if argument is available, so remove it from sys.argv
del sys.argv[1]
unittest.main()
elif arg.startswith('-p'):
process()
if __name__ == '__main__':
look_up_cli_params_for_tests_or_processing()
|
987,824 | 40860a449a211edccd07a8d77e8ab9a50eee78a5 | from RPi import GPIO
import httplib
import urllib
import time
from time import sleep
key=" " #Enter things speak key
begin=17
end=27
distance=10.0
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(begin,GPIO.IN)
GPIO.setup(end,GPIO.IN)
stime,etime=0,0
count=0
while(True):
print(".")
while(True):
if GPIO.input(begin)==GPIO.LOW:
stime=time.time()
break
while(True):
if GPIO.input(end)==GPIO.LOW:
etime=time.time()
speed=int(distance/(etime-stime))
count=count+1
break
temp=int(open('/sys/class/thermal/thermal_zone0/temp').read())/1e3
params=urllib.urlencode({'field1':temp,'field2':speed,'field3':count,'key':key})
headers={"content-typZZe":"application/x-www-form-urlencoded","Accept":"text/plain"}
conn=httplib.HTTPConnection("api.thingspeak.com:80")
try:
conn.request("POST","/update",params,headers)
response=conn.getresponse()
print str(temp)+" `C"
print str(speed)+" Km/hr"
print count
conn.close()
except:
print "Oops! Connection failed"
sleep(20)
|
987,825 | 7b1cf62c9b6006e3e519075e847d7015927074b6 | import cv2
import scipy.io as scio
import numpy as np
class img_operator:
def __init__(self):
self.path = '../image/1.png'
def print(self, name, gray):
print(name + '=\n{}\n'.format(gray))
def qu(self, gray, rc):
return gray[rc[0]:rc[1] + 1, rc[2]:rc[3] + 1]
def readImg(self, path):
gray = cv2.imread(path, 0)
gray = np.array(gray)
return gray
def showImg(self, title, gray, is_wait = False):
cv2.imshow(title, gray)
if is_wait == True:
cv2.waitKey(0)
def showMat(self, mat):
print('mat=\n{}'.format(mat))
# 截取小片段,测试用了
def returnMat(self):
gray = self.readImg(self.path)
return self.qu(gray, 50, 58, 205, 213)
def readGroundTruth(self, truthPath='../image/8068.mat'):
# path = './Mat/8068.mat'
groundTrhth = truthPath
matdata = scio.loadmat(groundTrhth)
# matgray = matdata['groundTruth'][0][0][0][0][1] * 255
# cv2.imshow('groundTruth', matgray)
# cv2.waitKey(0)
matgray = matdata['groundTruth'][0][0][0][0][1]
for k in range(1, 6):
for i in range(len(matgray)):
for j in range(len(matgray[0])):
if (matgray[i][j] != 1):
matgray[i][j] += matdata['groundTruth'][0][k][0][0][1][i][j]
matgray = matgray * 255
return matgray
def returnGroundTruthMat(self, truthPath='../image/8068.mat'):
groundTrhth = truthPath
matdata = scio.loadmat(groundTrhth)
# print(matdata['groundTruth'])
matgray = matdata['groundTruth'][0][0][0][0][1] * 255 # 修改第二个0
# print('matgray=\n', matgray)
return matgray
def showAllGroundTruth(self, path):
from pylab import plt, imshow, subplot
matdata = scio.loadmat(path)
n = len(matdata['groundTruth'][0])
mp = {}
for i in range(n):
mp[i] = matdata['groundTruth'][0][i][0][0][1] * 255
cv2.imshow('truth' + str(i), mp[i])
# htitch = np.hstack(list(mp[i] for i in mp.keys()))
# cv2.imshow("test1", htitch)
# cv2.waitKey(0)
def test(self):
# -*- coding: utf-8 -*-
# 利用 np.hstack、np.vstack实现一幅图像中显示多幅图片
"""
created on Thursday June 14 17:05 2018
@author: Jerry
"""
import cv2
# from pylab import *
img1 = cv2.imread('lena.jpg', cv2.IMREAD_COLOR)
img2 = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
img3 = cv2.imread('lena.jpg', cv2.IMREAD_UNCHANGED)
img4 = cv2.imread('lena.jpg')
htitch = np.hstack((img1, img3, img4))
vtitch = np.vstack((img1, img3))
cv2.imshow("test1", htitch)
cv2.imshow("test2", vtitch)
cv2.waitKey(0)
cv2.destroyAllWindows()
# io = ImgOperator()
# io.showAllGroundTruth()
|
987,826 | 9320894622232324256716fd33a857e5ae92073a | from django.db import models
from mutualtracker.fundtracking.models import Fund
class Country(models.Model):
class Meta:
verbose_name_plural = 'countries'
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Industry(models.Model):
class Meta:
verbose_name_plural = 'industries'
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Company(models.Model):
class Meta:
verbose_name_plural = 'companies'
name = models.CharField(max_length=255)
country = models.ForeignKey(Country)
code = models.SmallIntegerField(null=True)
ticker = models.CharField(max_length=15, null=True)
industries = models.ManyToManyField(Industry)
def __unicode__(self):
return self.name
class Report(models.Model):
REPORT_TYPE_CHOICES = (
(0, 'Invalid'),
(1, 'Annual'),
(2, 'Interim'),
)
REPORT_STATE_CHOICES = (
(0, 'Null'),
(1, 'Downloaded'),
(2, 'Parsed'),
)
def get_upload_path(self, filename):
return 'Reports/{0}/{1}'.format(self.fund.code, filename)
fund = models.ForeignKey(Fund)
date = models.DateField()
type = models.SmallIntegerField(choices=REPORT_TYPE_CHOICES)
file_name = models.FileField(upload_to=get_upload_path, max_length=255)
state = models.SmallIntegerField(choices=REPORT_STATE_CHOICES)
holdings = models.ManyToManyField(Company, through='Holding')
def __unicode__(self):
return '{0} - {1}: {2}'.format(self.fund, self.date, self.file_name)
class Holding(models.Model):
report = models.ForeignKey(Report)
company = models.ForeignKey(Company)
quantity = models.DecimalField(max_digits=10, decimal_places=2, null=True)
cost = models.DecimalField(max_digits=10, decimal_places=2, null=True)
value = models.DecimalField(max_digits=10, decimal_places=2)
percentage_of_nav = models.DecimalField(max_digits=10, decimal_places=2)
def __unicode__(self):
return '{0} - {1}'.format(self.report, self.company)
|
987,827 | d658c069b4699fd23f7e2c83ab0e33a600a6fa89 | from tornado.web import Application
from day1.utils.dbutil import DBUtil
class MyAppliction(Application):
def __init__(self, handlers, tp, sp, um):
super().__init__(handlers, template_path=tp,
static_path=sp, ui_modules=um)
self.dbutil = DBUtil() |
987,828 | 713da99a464057c9a4e8ca3a29be887ecad0d66a | """
PROBLEM 45
Write a program to print the output of the following poblem statement :-
Initialize `fahrenheit` dictionary
fahrenheit = {'t1':-30, 't2':-20, 't3':-10, 't4':0}
1 Get the corresponding `celsius` values in list
2 Create the `celsius` dictionary
3 convert a dictionary of Fahrenheit temperatures into celsius?
"""
fahrenheit = { 't1' : -30, 't2' : -20, 't3' : -10, 't4' : 0 }
list(fahrenheit.items())
dict_cel = { key : round( ( float( 5 ) / 9 ) * ( val - 32 ), 2 ) for (key, val) in list(fahrenheit.items()) }
print("Desired output :", dict_cel) |
987,829 | f8c4d4f6a950513ed8591d30f0fa0e6ac02c69f7 | # -*- coding: utf-8 -*-
# vim: set expandtab:ts=4
"""
/***************************************************************************
Timeseries base class
A QGIS plugin
Plugin for visualization and analysis of remote sensing time series
-------------------
begin : 2013-03-15
copyright : (C) 2013 by Chris Holden
email : ceholden@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import abc
import datetime as dt
import os
import numpy as np
from osgeo import gdal
import scipy.io
class AbstractTimeSeries(object):
""" Abstract base class representing a remote sensing time series.
AbstractTimeSeries class is meant to be sub-classed and its methods
overriden. This interface simply defines attributes and methods expected
by "TSTools" QGIS plugin.
Required attributes:
image_names Names or IDs for each image
filenames File basename for each image
filepaths Full path to each image
length Number of images in time series
dates np.array of datetime for each image
n_band number of bands per image
x_size number of columns per image
y_size number of rows per image
geo_transform geo-transform of images
projection projection of images
px current pixel column
py current pixel row
has_results boolean indicating existence of model fit
Required methods:
fetch_pixel retrieve pixel data for given x/y
fetch_result retrieve result for given x/y
get_data return dataset
get_prediction return predicted dataset for x/y
get_breaks return break points for time segments
Additional attributes:
has_cache boolean indicating existence of cached data
can_cache boolean indicating potential to cache data
cache_folder location of cache, if any
mask_band band (index on 0) of mask within images
mask_val values to mask
__metadata__ list of attributes containing metadata
Additional methods:
apply_mask apply mask to dataset
retrieve_from_cache retrieve dataset from cached retrieval
write_to_cache write retrieved dataset to cache
"""
__metaclass__ = abc.ABCMeta
# Overide/set these within subclasser as needed
has_cache = False
can_cache = False
cache_folder = None
mask_band = None
mask_val = None
__metadata__ = []
__metadata__str__ = []
def __init__(self, location, image_pattern, stack_pattern):
# Basic, required information
self.location = os.path.realpath(location)
self.image_pattern = image_pattern
self.stack_pattern = stack_pattern
def __repr__(self):
return 'A {c} time series of {n} images at {m}'.format(
c=self.__class__.__name__, n=self.length, m=hex(id(self)))
# ADDITIONAL METHODS: override/set by subclasser as needed
def apply_mask(self, mask_band=None, mask_val=None):
""" Use subclasser to set if capability is available """
pass
def retrieve_from_cache(self, x, y):
""" Use subclasser to set if capability is available """
return False
def write_to_cache(self):
""" Use subclasser to set if capability is available """
return False
# REQUIRED PROPERTIES
@abc.abstractproperty
def image_names(self):
""" Common names or IDs for each image """
pass
@abc.abstractproperty
def filenames(self):
""" File basename for each image """
pass
@abc.abstractproperty
def filepaths(self):
""" Full path to each image """
pass
@abc.abstractproperty
def length(self):
""" Length of the time series """
pass
@abc.abstractproperty
def dates(self):
""" np.array of datetime for each image """
pass
@abc.abstractproperty
def n_band(self):
""" number of bands per image """
pass
@abc.abstractproperty
def x_size(self):
""" number of columns per image """
pass
@abc.abstractproperty
def y_size(self):
""" number of rows per image """
pass
@abc.abstractproperty
def geo_transform(self):
""" geo-transform for each image """
pass
@abc.abstractproperty
def projection(self):
""" projection for each image """
pass
@abc.abstractproperty
def has_results(self):
""" boolean indicating existence of model fit """
pass
# HELPER METHOD
def get_ts_pixel(self, x, y):
""" Fetch pixel data for a given x/y and set to self.data
Args:
x column
y row
"""
for i in xrange(self.length):
self.retrieve_pixel(x, y, i)
# REQUIRED METHODS
@abc.abstractmethod
def retrieve_pixel(self, x, y, index):
""" Return pixel data for a given x/y and index in time series
Args:
x column
y row
index index of image in time series
Returns:
data n_band x 1 np.array
"""
pass
@abc.abstractmethod
def retrieve_result(self, x, y):
""" Retrieve algorithm result for a given x/y
Args:
x column
y row
"""
pass
@abc.abstractmethod
def get_data(self, mask=True):
"""
"""
pass
@abc.abstractmethod
def get_prediction(self, band):
"""
"""
pass
@abc.abstractmethod
def get_breaks(self, x, y):
"""
"""
pass
@abc.abstractmethod
def get_px(self):
""" current pixel column number """
pass
@abc.abstractmethod
def set_px(self, value):
""" set current pixel column number """
pass
@abc.abstractmethod
def get_py(self):
""" current pixel row number """
pass
@abc.abstractmethod
def set_py(self, value):
""" set current pixel row number """
pass
_px = abc.abstractproperty(get_px, set_px)
_py = abc.abstractproperty(get_py, set_py)
# Utility reader class
class ImageReader(object):
"""
This class defines the methods for reading pixel values from a raster
dataset. I've coded this up because certain file formats are more
efficiently accessed via fopen than via GDAL (i.e. BIP).
http://osdir.com/ml/gdal-development-gis-osgeo/2007-04/msg00345.html
If the fformat isn't a BIP, then we just use GDAL. In the future we can
probably code it better for BIL and maybe BSQ.
Args:
filename filename of the raster to read from
fformat file format of the raster
dt numpy datatype
size list of [nrow, ncol]
n_band number of bands in image
"""
def __init__(self, filename, fformat, dt, size, n_band):
self.filename = filename
self.fformat = fformat
self.dt = dt
self.size = size
self.n_band = n_band
# Switch the actual definition of get_pixel by fformat
# TODO: reimplement this using class inheritance
# https://www.youtube.com/watch?v=miGolgp9xq8
if fformat == 'BIP':
self.get_pixel = self.__BIP_get_pixel
else:
self.get_pixel = self.__band_get_pixel
def __BIP_get_pixel(self, row, col):
if row < 0 or row >= self.size[0] or col < 0 or col >= self.size[1]:
raise ValueError('Cannot select row,col %s,%s' % (row, col))
with open(self.filename, 'rb') as f:
# Skip to location of data in file
f.seek(self.dt.itemsize * (row * self.size[1] + col) *
self.n_band)
# Read in
dat = np.fromfile(f, dtype=self.dt, count=self.n_band)
return dat
def __band_get_pixel(self, row, col):
if row < 0 or row >= self.size[0] or col < 0 or col >= self.size[1]:
raise ValueError('Cannot select row,col %s,%s' % (row, col))
ds = gdal.Open(self.filename, gdal.GA_ReadOnly)
pixels = np.zeros(self.n_band)
for i in range(ds.RasterCount):
b = ds.GetRasterBand(i + 1)
pixels[i] = b.ReadAsArray(col, row, 1, 1)
ds = None
return pixels
# Utility functions
def mat2dict(matlabobj):
"""
Utility function:
Converts a scipy.io.matlab.mio5_params.mat_struct to a dictionary
"""
d = {}
for field in matlabobj._fieldnames:
value = matlabobj.__dict__[field]
if isinstance(value, scipy.io.matlab.mio5_params.mat_struct):
d[field] = mat2dict(value)
else:
d[field] = value
return d
def ml2pydate(ml_date):
"""
Utility function:
Returns Python datetime for MATLAB date
"""
return dt.datetime.fromordinal(int(ml_date)) - dt.timedelta(days = 366)
def py2mldate(py_date):
"""
Utility function:
Returns MATLAB datenum for Python datetime
"""
return (py_date + dt.timedelta(days = 366)).toordinal()
|
987,830 | 6b25b81f4752e1df62a50aefc590449c5658e984 | from torch import nn
from torch.nn import functional as F
# https://fleuret.org/ee559/src/dlc_practical_4_solution.py as inspiration for this model
class SimpleConvolutionalNeuralNetwork(nn.Module):
def __init__(self, hidden_layers):
super(SimpleConvolutionalNeuralNetwork, self).__init__()
# First layer
# 2 channels as input
# 32 channels as output
self.conv_1 = nn.Sequential(
nn.Conv2d(2, 32, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=3))
# Second layer
# 32 channels as input
# 64 channels as output
self.conv_2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
# Calculation of output channel size provided by TA
# ((image_size - kernel_size + 2*(padding)) / stride) + 1)/channels
# First layer ((14-5+2*2)/1 +1)/2 = 14/2 = 7
# Second layer ((7-4+2*2)/1 +1)/2 = 8/2 = 4
# Third layer
# 256 channels as input (maxpool2d kernel x stride * output_size)
# hidden_layers as output
self.fc_1 = nn.Linear(2 * 2 * 64, hidden_layers)
# Fourth layer
# hidden_layers as input
# 2 channels as output
self.fc_2 = nn.Linear(hidden_layers, 2)
def forward(self, x):
# Activation of first convolution
# Size: (batch_size, 32 ,7 ,7)
out = self.conv_1(x)
# Activation of second convolution
# Size: (batch_size, 64 ,2 ,2)
out = self.conv_2(out)
out = out.reshape(out.size(0), -1)
# ReLU activation of last layer
out = F.relu(self.fc_1(out.view(-1, 2 * 2 * 64)))
out = self.fc_2(out)
return out
|
987,831 | b2704e6625efd503d9dfe94d33da2668699924fa | """This module marks the folder as a python package and do some import."""
from .file_globbing import expan_globbing_pattern
from .parameter_expansion import expan_parameter
from .tilde_expansion import expan_tilde
from .parameter_assignment import assign_paramenter
|
987,832 | 28c1a53884380c7ed82eb2db031595c768f30917 | from pathlib import Path
import asyncio
from kallikrein import k, Expectation, kf
from kallikrein.matchers.maybe import be_just
from amino.test import temp_dir
from amino import List, Just, _, Map
from ribosome.machine.messages import Nop, Stage1
from ribosome.machine.state import AutoRootMachine
from ribosome.nvim import NvimFacade
from ribosome.settings import Config
from ribosome.test.integration.klk import later
from proteome.project import Project, Projects, ProjectAnalyzer
from proteome.components.core import Next, Prev, RemoveByIdent, AddByParams
from proteome.components.history.data import History
from proteome.components.core.message import Create
from proteome import mk_config
from proteome.env import Env
from proteome.components.core.main import Core
from unit._support.loader import LoaderSpec
from unit._support.async import test_loop
null = Path('/dev/null')
class ProteomeSpec(LoaderSpec):
'''transition unit tests
create a project $create
cycle through projects $cycle
create ctags for two projects $ctags
'''
def setup(self):
LoaderSpec.setup(self)
asyncio.get_child_watcher()
def _prot(self, p=Map(), b=List(), t=Map(), pros=List()):
initial = Projects(projects=pros)
def ctor(config: Config, vim: NvimFacade) -> Env:
return Env(projects=initial, config=config, vim_facade=Just(vim))
return AutoRootMachine(self.vim, mk_config(state_ctor=ctor, components=p), 'proteome').transient()
def create(self) -> Expectation:
name = 'proj'
with self._prot() as prot:
data = prot.send_sync(Create(name, null))
p = data.projects.projects[0]
return (k(p.name) == name) & (k(p.root) == null)
def cycle(self) -> Expectation:
self.vim_mock.should_receive('switch_root').and_return(None)
name = 'proj'
name2 = 'proj2'
pros = List(Project.of(name, null), Project.of(name2, null))
with self._prot(pros=pros) as prot:
return (
k(prot.data.current).must(be_just(pros[0])) &
k(prot.send_sync(Next()).current).must(be_just(pros[1])) &
k(prot.send_sync(Prev()).current).must(be_just(pros[0]))
)
def ctags(self) -> Expectation:
plug_name = 'proteome.components.ctags'
p1 = self.mk_project('pro1', 'c')
p2 = self.mk_project('pro2', 'go')
pros = List(p1, p2)
with self._prot(List(plug_name), pros=pros) as prot:
with test_loop() as loop:
plug = prot.plugin('ctags')._get
p1.tag_file.exists().should_not.be.ok
p2.tag_file.exists().should_not.be.ok
prot.plug_command('ctags', 'gen_all', List())
def check(p):
plug.ctags.await_threadsafe(loop)
p.tag_file.exists().should.be.ok
later(check, p1)
later(check, p2)
plug.ctags.ready.should.be.ok
class history_(object):
def setup(self):
self.vim.vars.set_p('all_projects_history', 1)
self.vim.vars.set('proteome_history_base', str(self.history_base))
self.plug_name = 'proteome.components.history'
self.main_project = self.mk_project('pro1', 'c')
self.test_file_1 = self.main_project.root / 'test_file_1'
self.test_content = List(
'content_1',
'content_2',
'content_3',
)
def _three_commits(self, prot, loop):
plug = prot.plugin('history').x
for cont in self.test_content:
self.test_file_1.write_text(cont)
prot.plug_command('history', 'Commit', List())
self._await(plug.executor, loop)
def _await(self, executor, loop):
self._wait(0.1)
while not executor.ready:
self._wait(0.1)
executor.await_threadsafe(loop)
self._wait(0.1)
def init(self):
def check_head(p):
(self.history_base / p.fqn / 'HEAD').exists().should.be.ok
p1 = self.main_project
p2 = self.mk_project('pro2', 'go')
pros = List(p1, p2)
with self._prot(List(self.plug_name), pros=pros) as prot:
prot.plug_command('history', 'Stage4', List())
later(lambda: check_head(p1))
check_head(p2)
def commit(self):
p1 = self.main_project
p2 = self.mk_project('pro2', 'go')
pros = List(p1, p2)
hist = History(self.history_base)
with self._prot(List(self.plug_name), pros=pros) as prot:
with test_loop() as loop:
prot.plug_command('history', 'Stage4', List())
plug = prot.plugin('history').x
self.test_file_1.write_text('test')
prot.plug_command('history', 'Commit', List())
self._await(plug.executor, loop)
(hist.repo(p1) / _.history // _.head / repr).should.just
def prev_next(self):
p1 = self.main_project
p2 = self.mk_project('pro2', 'go')
pros = List(p1, p2)
with self._prot(List(self.plug_name), pros=pros) as prot:
with test_loop() as loop:
prot.plug_command('history', 'Stage4', List())
self._three_commits(prot, loop)
prot.plug_command('history', 'HistoryLog', List())
prot.plug_command('history', 'HistoryPrev', List())
prot.plug_command('history', 'HistoryLog', List())
later(lambda: self.test_file_1.read_text()
.should.equal(self.test_content[1]))
prot.plug_command('history', 'HistoryNext', List())
later(lambda: self.test_file_1.read_text()
.should.equal(self.test_content[2]))
def current_project(self):
p = self.pypro1_root
flexmock(ProjectAnalyzer)\
.should_receive('main_dir')\
.and_return(Just(p))
ctx = self._prot(b=List(self.project_base), t=self.type_bases)
target = Project.of(self.pypro1_name, p, Just(self.pypro1_type))
with ctx as prot:
prot.send_sync(Stage1())
prot.send_sync(Nop())\
.projects.projects.head\
.should.equal(Just(target))
def add_remove_project(self):
ctx = self._prot(List(), List(self.project_base), self.type_bases)
with ctx as prot:
prot.send_sync(AddByParams(self.pypro1_name, Map()))\
.project(self.pypro1_name)\
.map(_.root)\
.should.contain(self.pypro1_root)
prot.send_sync(RemoveByIdent(self.pypro1_name))\
.all_projects.should.be.empty
def add_by_params(self):
tpe = 'ptype'
name = 'pname'
root = temp_dir('proteome', 'add_by_params')
params = Map(
type=tpe,
root=root
)
with self._prot() as prot:
ret = prot.send_sync(AddByParams(name, params))
(ret.project(name) / _.root).should.contain(root)
__all__ = ('ProteomeSpec',)
|
987,833 | e6e09a1f9a4143d876f6dc475d02fa770496c1b8 | def urlsafe_b64encode(bs: bytes) -> str:
return encode(bs, True).decode('utf-8')
def urlsafe_b64decode(s: str) -> bytes:
return base64url_decode(s)
_to_base64 = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'
]
_to_base64_url = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_'
]
def _from_base64(base64: list) -> list:
result = [-1] * 256
for i, v in enumerate(base64):
result[ord(v)] = i
result[ord('=')] = -2
return result
def encode(s, is_url: bool = False) -> bytes:
base64 = _to_base64_url if is_url else _to_base64
if isinstance(s, bytes):
src = s
elif isinstance(s, str):
src = bytes(s, 'utf-8')
dst = bytearray(_encode_length(len(src)))
start = 0
end = len(src)
slen = int(end / 3) * 3
dp = 0
sl0 = slen
while start < slen:
sp0 = start
dp0 = dp
while sp0 < sl0:
bits = (src[sp0] & 0xff) << 16 | (src[sp0+1] & 0xff) << 8 | (src[sp0+2] & 0xff)
sp0 = sp0 + 3
dst[dp0] = ord(base64[(bits >> 18) & 0x3f])
dst[dp0+1] = ord(base64[(bits >> 12) & 0x3f])
dst[dp0+2] = ord(base64[(bits >> 6) & 0x3f])
dst[dp0+3] = ord(base64[bits & 0x3f])
dp0 = dp0 + 4
dlen = int((sl0 - start) / 3) * 4
dp += dlen
start = sl0
if start < end:
b0 = src[start] & 0xff
start = start + 1
dst[dp] = ord(base64[b0 >> 2])
dp = dp + 1
if start == end:
dst[dp] = ord(base64[b0 << 4 & 0x3f])
dst[dp+1] = ord('=')
dst[dp+2] = ord('=')
else:
b1 = src[start] & 0xff
dst[dp] = ord(base64[b0 << 4 & 0x3f | (b1 >> 4)])
dst[dp+1] = ord(base64[b1 << 2 & 0x3f])
dst[dp+2] = ord('=')
return bytes(dst)
def decode(src, is_url: bool = False) -> bytes:
if isinstance(src, str):
src = bytes(src, 'utf-8')
dst = bytearray(_decode_length(src))
sp = 0
sl = len(src)
dp = 0
bits = 0
shiftto = 18
while sp < sl:
b = src[sp] & 0xff
sp = sp + 1
base64 = _to_base64_url if is_url else _to_base64
b = _from_base64(base64)[b]
if b < 0:
if b == -2:
if shiftto == 6 and (sp == sl or src[sp] != ord('=')) or shiftto == 18:
raise Exception('Input byte array has wrong 4-byte ending unit')
sp = sp + 1
break
raise Exception(f'Illegal base64 character {chr(src[sp - 1])}')
bits = bits | (b << shiftto)
shiftto -= 6
if shiftto < 0:
dst[dp] = (bits >> 16 & 0xff)
dst[dp+1] = (bits >> 8 & 0xff)
dst[dp+2] = bits & 0xff
dp = dp + 3
shiftto = 18
bits = 0
if shiftto == 6:
dst[dp] = (bits >> 16 & 0xff)
elif shiftto == 0:
dst[dp] = (bits >> 16 & 0xff)
dst[dp+1] = (bits >> 8 & 0xff)
elif shiftto == 12:
raise Exception('Last unit does not have enough valid bits')
if sp < sl:
raise Exception(f'Input byte array has incorrect ending byte at {sp}')
return bytes(dst)
def urlsafe_encode(s: str) -> bytes:
return encode(s, True)
def urlsafe_decode(s) -> bytes:
return decode(s, True)
def _encode_length(l: int) -> int:
return 4 * int((l + 2) / 3)
def _decode_length(s: bytes) -> int:
paddings = 0
start = 0
sl = len(s)
l = sl - start
if l == 0:
return 0
if l < 2:
raise Exception('Input byte[] should at least have 2 bytes for base64 bytes')
if s[sl - 1] == ord('='):
paddings = paddings + 1
if s[sl - 2] == ord('='):
paddings = paddings + 1
if paddings == 0 and l & 0x3 != 0:
paddings = 4 - (l & 0x3)
return 3 * (int((l + 3) / 4)) - paddings
def base64url_decode(s) -> bytes:
if isinstance(s, str):
s = s.encode('ascii')
rem = len(s) % 4
if rem > 0:
s += b'=' * (4 - rem)
return urlsafe_decode(s)
|
987,834 | 2629e6a2e9f136fe61b55d1550c4941977f32a86 | from flask import Flask
# 导入Flask模块
app = Flask(__name__)
# 创建Flask对象并且以当前模块的名称作为参数
@app.route('/hello/<name>')
# route是个装饰器,
# app.route(rule, options),rule 绑定的URL,options 转发给基础Rule对象的参数列表
# route装饰器可将URL绑定到函数,URL需要是规范的URL
# @app.route(‘/hello’) 等同于 app.add_url_rule(‘/’, ‘hello’, hello_world)
def hello_name(name):
return "Hello %s!" % name
# 通过向规则参数添加变量部分,可以动态构建URL。
# 此变量部分标记为<variable-name>。它作为关键字参数传递给与规则相关联的函数。
@app.route('/blog/<int:postID>')
def show_blog(postID):
return 'Blog Number %d' % postID
@app.route('/rev/<float:revNo>')
def revision(revNo):
return 'Revision Number %f' % revNo
if __name__ == '__main__':
app.run()
# Flask类的 run方法在本地开发服务器上运行应用程序
# app.run(host, port, debug, options)
# host 要监听的主机名。 默认为127.0.0.1(localhost)。设置为“0.0.0.0”以使服务器在外部可用
# port 监听端口,默认值为5000
# debug 默认为false,设置为true可提供调试信息
# options 要转发到底层的Werkzeug服务器 |
987,835 | 7aa683eac4d728295f8a720f96f2ca62c1cf0f2d | import ft_retriver.views as ft_retriver_view
"""retriver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('hello/', ft_retriver_view.hello),
path('uploadfile/', ft_retriver_view.upload_file),
path('xml/', ft_retriver_view.xml_deal),
path('json/', ft_retriver_view.json_deal),
path('text_distribution/',ft_retriver_view.text_distribution),
path('tf_idf/',ft_retriver_view.tf_idf),
path('tf_idf_automatic/',ft_retriver_view.tf_idf_auto)
]
|
987,836 | 40af1bd88b639325af79b05380ed598d3b64510e | """
const.py:
Store class to help deal with constant variable
"""
class Template():
"""
Store template name
"""
PUBLIC_INDEX = "roomalloc/public/index.html"
PUBLIC_ABOUT = "roomalloc/public/about.html"
PUBLIC_CONTACT = "roomalloc/public/contact.html"
PUBLIC_FD_CONF = "roomalloc/public/fd_confirm.html"
ACC_LOGIN = "roomalloc/public/login.html"
ACC_SIGNUP = "roomalloc/public/signup.html"
ACC_PROFILE = "roomalloc/user/user_profile.html"
USER_HOME = "roomalloc/user/user_home.html"
RES_LIST = "roomalloc/user/reserve/res_list.html"
RES_DETAIL = "roomalloc/user/reserve/res_detail.html"
ROOM_EXPLORE = "roomalloc/user/room/room_explore.html"
ROOM_DETAIL = "roomalloc/user/room/room_detail.html"
ROOM_RESERVE = "roomalloc/user/room/room_reserve.html"
ROOM_CONFIRM = "roomalloc/user/room/room_confirm.html"
class TplConst():
"""
Const that appear in django template
"""
NBAR = "nbar"
class GroupName():
NORMAL = 'normal'
STAFF = 'staff'
|
987,837 | af97a07a157a26774fc34860a9a6a3577719d04b | #! /usr/bin/env python
"""
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
"""
class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
maxdiff = 0
minprice = prices[0]
for i in prices[1:]:
if i > minprice:
if (i - minprice) > maxdiff:
maxdiff = i - minprice
else:
minprice = i
return maxdiff
if __name__ == "__main__":
s = Solution()
print(s.maxProfit([7, 1, 5, 3, 6, 4]))
print(s.maxProfit([7, 6, 4, 3, 1])) |
987,838 | fe7492cacea4b2b2564987b7cb57d9f0524bd008 | from django.shortcuts import render, redirect
from django.http.response import HttpResponseRedirect
from .models import JadwalBelajarBareng
from .forms import JadwalForm
from django.core import serializers
from django.http.response import HttpResponse
from django.contrib.auth.decorators import login_required
def jadwal(request):
jadwalb = JadwalBelajarBareng.objects.all()
context = { 'jadwalb' : jadwalb }
return render(request, 'jadwal_belajar_bareng.html', context)
@login_required(login_url = '/login')
def add_jadwal(request):
form = JadwalForm()
if request.method == "POST":
form = JadwalForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/jadwal-belajar-bareng')
context = {'form' : form }
return render(request, 'add_jadwal.html', context)
@login_required(login_url = '/login')
def remove_jadwal(request, id):
jdwl = JadwalBelajarBareng.objects.get(id = id)
if request.method == "POST":
jdwl.delete()
return redirect('/jadwal-belajar-bareng')
context = { 'sched' : jdwl }
return render(request, 'remove_jadwal.html', context)
@login_required(login_url = '/login')
def edit_jadwal(request, id):
jdwl = JadwalBelajarBareng.objects.get(id = id)
if request.method == "POST":
Prioritas = request.POST.get("Prioritas")
Matkul = request.POST.get("Matkul")
Waktu = request.POST.get("Waktu")
Topik = request.POST.get("Topik")
Informasi = request.POST.get("Informasi")
Link = request.POST.get("Link")
simpan = JadwalBelajarBareng.objects.filter(id = id).update(Prioritas = Prioritas, Matkul = Matkul, Waktu = Waktu, Topik = Topik, Informasi = Informasi, Link = Link)
return redirect('/jadwal-belajar-bareng')
context = { 'sched' : jdwl }
return render(request, 'edit_jadwal.html', context)
def xml(request):
data = serializers.serialize('xml', JadwalBelajarBareng.objects.all())
return HttpResponse(data, content_type="application/xml")
def json(request):
data = serializers.serialize('json', JadwalBelajarBareng.objects.all())
return HttpResponse(data, content_type="application/json")
def prioritas_tinggi(request):
jadwalb = JadwalBelajarBareng.objects.all()
context = { 'jadwalb' : jadwalb }
return render(request, 'prioritas_tinggi.html', context)
def prioritas_sedang(request):
jadwalb = JadwalBelajarBareng.objects.all()
context = { 'jadwalb' : jadwalb }
return render(request, 'prioritas_sedang.html', context)
def prioritas_rendah(request):
jadwalb = JadwalBelajarBareng.objects.all()
context = { 'jadwalb' : jadwalb }
return render(request, 'prioritas_rendah.html', context)
def prioritas_all(request):
jadwalb = JadwalBelajarBareng.objects.all()
context = { 'jadwalb' : jadwalb }
return render(request, 'prioritas_all.html', context)
|
987,839 | 2e0577cfbf1feb5239ae3c6119a48c3d81a8d93b | import sys
from diot import Diot
from bioprocs.utils import shell2 as shell
infile = {{i.infile | quote}}
hfile = {{i.hfile | quote}}
samfile = {{i.samfile | ?!:args.params.get('s', args.params.get('samples')) | repr}}
outfile = {{o.outfile | quote}}
bcftools = {{args.bcftools | quote}}
params = {{args.params | repr}}
nthread = {{args.nthread | repr}}
shell.load_config(bcftools = bcftools)
params._ = infile
params.o = outfile
if hfile:
params.h = hfile
if samfile:
if path.isfile(samfile):
params.S = samfile
else:
params.s = samfile
cmd = shell.bcftools.reheader(**params).cmd
sys.stderr.write("\n%s RUNNING %s\n%s\n%s\n\n" % ("-" * 40, "-" * 40, cmd, "-" * 89))
|
987,840 | 50a406ec8d784bd094dd1a0472c1486ccda27379 | from __future__ import annotations
import logging.config
import os
from collections.abc import Sequence
from pathlib import Path
import dask.config
import xarray as xr
from dask import compute
from dask.diagnostics import ProgressBar
from xclim.core import calendar
from miranda.gis import subset_domain
from miranda.io.utils import delayed_write, get_chunks_on_disk
from miranda.scripting import LOGGING_CONFIG
from miranda.utils import chunk_iterables
from ._aggregation import aggregate as aggregate_func
from ._data_corrections import dataset_corrections
from ._data_definitions import project_institutes, xarray_frequencies_to_cmip6like
logging.config.dictConfig(LOGGING_CONFIG)
dask.config.set(local_directory=f"{Path(__file__).parent}/dask_workers/")
__all__ = ["reanalysis_processing"]
# Needed pre-processing function
def _drop_those_time_bnds(dataset: xr.Dataset) -> xr.Dataset:
if "time_bnds" in dataset.variables:
return dataset.drop_vars(["time_bnds"])
return dataset
def reanalysis_processing(
data: dict[str, list[str | os.PathLike]],
output_folder: str | os.PathLike,
variables: Sequence[str],
aggregate: str | bool = False,
domains: str | list[str] = "_DEFAULT",
start: str | None = None,
end: str | None = None,
target_chunks: dict | None = None,
output_format: str = "netcdf",
overwrite: bool = False,
engine: str = "h5netcdf",
n_workers: int = 4,
**dask_kwargs,
) -> None:
"""
Parameters
----------
data: dict[str, list[str]]
output_folder: str or os.PathLike
variables: Sequence[str]
aggregate: {"day", None}
domains: {"QC", "CAN", "AMNO", "NAM", "GLOBAL"}
start: str, optional
end: str, optional
target_chunks: dict, optional
output_format: {"netcdf", "zarr"}
overwrite: bool
engine: {"netcdf4", "h5netcdf"}
n_workers: int
Returns
-------
None
"""
if output_format == "netcdf":
suffix = ".nc"
elif output_format == "zarr":
suffix = ".zarr"
else:
raise NotImplementedError(f"`output_format`: '{output_format}")
with ProgressBar(), dask.config.set(
**{"array.slicing.split_large_chunks": False},
n_workers=n_workers,
**dask_kwargs,
):
out_files = Path(output_folder)
if isinstance(domains, str):
domains = [domains]
for domain in domains:
if domain == "_DEFAULT":
logging.warning("No domain specified. proceeding with 'not-specified'.")
output_folder = output_folder
domain = "not-specified"
elif isinstance(domain, str):
output_folder = out_files.joinpath(domain) # noqa
else:
raise NotImplementedError()
output_folder.mkdir(exist_ok=True)
for project, in_files in data.items():
logging.info(
f"Processing {project} data{f' for domain {domain}' if domain !='not_specified' else ''}."
)
for var in variables:
# Select only for variable of interest
multi_files = sorted(x for x in in_files if f"{var}_" in str(x))
if multi_files:
all_chunks = get_chunks_on_disk(multi_files[0])
chunks = all_chunks[var]
if target_chunks is None:
output_chunks = dict()
mappings = dict(longitude="lon", latitude="lat")
for k, v in chunks.items():
if k in mappings.keys():
output_chunks[mappings[k]] = v
else:
output_chunks[k] = v
logging.warning(
"No `target_chunks` set. "
f"Proceeding with following found chunks: {output_chunks}."
)
else:
output_chunks = target_chunks
logging.info(f"Resampling variable `{var}`.")
if aggregate:
time_freq = aggregate
else:
parse_freq = calendar.parse_offset(
xr.infer_freq(xr.open_dataset(multi_files[0]).time)
)
time_freq = f"{parse_freq[0]}{xarray_frequencies_to_cmip6like[parse_freq[1]]}"
institute = project_institutes[project]
file_name = "_".join([var, time_freq, institute, project])
if domain != "not-specified":
file_name = f"{file_name}_{domain}"
xr_kwargs = dict(
chunks=chunks,
engine=engine,
preprocess=_drop_those_time_bnds,
parallel=True,
)
# Subsetting operations
if domain.lower() in ["global", "not-specified"]:
if start or end:
ds = xr.open_mfdataset(multi_files, **xr_kwargs).sel(
time=slice(start, end)
)
else:
ds = xr.open_mfdataset(multi_files, **xr_kwargs)
else:
ds = subset_domain(
xr.open_mfdataset(multi_files, **xr_kwargs),
domain,
start_date=start,
end_date=end,
)
ds.attrs.update(dict(frequency=time_freq, domain=domain))
ds = dataset_corrections(ds, project=project)
if time_freq.lower() == "day":
dataset = aggregate_func(ds, freq="day")
freq = "YS"
else:
out_variable = (
list(ds.data_vars)[0]
if len(list(ds.data_vars)) == 1
else None
)
dataset = {out_variable: ds}
freq = "MS"
if len(dataset) == 0:
logging.warning(
f"Daily aggregation methods for variable `{var}` are not supported. "
"Continuing..."
)
for key in dataset.keys():
ds = dataset[key]
# TODO: What do we do about multivariable files. Are they even allowed?
out_variable = (
list(ds.data_vars)[0]
if len(list(ds.data_vars)) == 1
else None
)
file_name1 = file_name.replace(
f"{var}_", f"{out_variable}_"
)
logging.info(f"Writing out fixed files for {file_name1}.")
years, datasets = zip(*ds.resample(time=freq))
if freq == "MS":
format_str = "%Y-%m"
iterable_chunks = 36
else:
format_str = "%Y"
iterable_chunks = 10
out_filenames = [
output_folder.joinpath(
f"{file_name1}_{xr.DataArray(year).dt.strftime(format_str).values}{suffix}"
)
for year in years
]
jobs = list()
if output_format != "zarr" and overwrite:
logging.warning(
f"Removing existing {output_format} files for {var}."
)
for i, d in enumerate(datasets):
if (
out_filenames[i].exists()
and out_filenames[i].is_file()
and overwrite
):
out_filenames[i].unlink()
if not out_filenames[i].exists() or (
out_filenames[i].is_dir() and overwrite
):
jobs.append(
delayed_write(
d,
out_filenames[i],
output_format,
overwrite,
target_chunks=output_chunks,
)
)
if len(jobs) == 0:
logging.warning(
f"All output files for `{var}` currently exist."
" To overwrite them, set `overwrite=True`. Continuing..."
)
else:
chunked_jobs = chunk_iterables(jobs, iterable_chunks)
logging.info(f"Processing jobs for variable `{var}`.")
iterations = 0
for chunk in chunked_jobs:
iterations += 1
logging.info(f"Writing out job chunk {iterations}.")
compute(chunk)
else:
logging.info(f"No files found for variable {var}.")
|
987,841 | 7ec6461ee9bc80e0c036c787a943b2211c320c60 | # Generated by Django 2.0.9 on 2019-05-16 21:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0038_auto_20190516_1522'),
]
operations = [
migrations.AlterField(
model_name='app',
name='last_editor',
field=models.CharField(choices=[('Benjamin Smith', 'Benjamin Smith'), ('Benjamin serepes', 'Benjamin serepes'), ('Christopher Daughton', 'Christopher Daughton'), ('Jarom Hlebasko', 'Jarom Hlebasko'), ('John McLaughlin', 'John McLaughlin'), ('Rafal Rudzinski', 'Rafal Rudzinski')], max_length=100, verbose_name='Last Editor'),
),
]
|
987,842 | 216f911fdbb7b94275c01a11ccfc0cb54b704b8c | # coding: utf-8
import json
import datetime
from dateutil.tz import tzutc
import responses
import ibm_watson
from ibm_watson import ApiException
from ibm_watson.assistant_v1 import Context, Counterexample, \
CounterexampleCollection, Entity, EntityCollection, Example, \
ExampleCollection, MessageInput, Intent, IntentCollection, Synonym, \
SynonymCollection, Value, ValueCollection, Workspace, WorkspaceCollection
from ibm_cloud_sdk_core.authenticators import BasicAuthenticator
platform_url = 'https://gateway.watsonplatform.net'
service_path = '/assistant/api'
base_url = '{0}{1}'.format(platform_url, service_path)
#########################
# counterexamples
#########################
@responses.activate
def test_create_counterexample():
endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"text": "I want financial advice today.",
"created": "2016-07-11T16:39:01.774Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(version='2017-02-03', authenticator=authenticator)
service.set_service_url(base_url)
counterexample = service.create_counterexample(
workspace_id='boguswid', text='I want financial advice today.').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert counterexample == response
# Verify that response can be converted to a Counterexample
Counterexample._from_dict(counterexample)
@responses.activate
def test_rate_limit_exceeded():
endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
error_code = 429
error_msg = 'Rate limit exceeded'
responses.add(
responses.POST,
url,
body='Rate limit exceeded',
status=429,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(version='2017-02-03', authenticator=authenticator)
try:
service.create_counterexample(
workspace_id='boguswid', text='I want financial advice today.')
except ApiException as ex:
assert len(responses.calls) == 1
assert isinstance(ex, ApiException)
assert error_code == ex.code
assert error_msg in str(ex)
@responses.activate
def test_unknown_error():
endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
error_msg = 'Unknown error'
responses.add(
responses.POST,
url,
status=407,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(version='2017-02-03', authenticator=authenticator)
try:
service.create_counterexample(
workspace_id='boguswid', text='I want financial advice today.')
except ApiException as ex:
assert len(responses.calls) == 1
assert error_msg in str(ex)
@responses.activate
def test_delete_counterexample():
endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format(
'boguswid', 'I%20want%20financial%20advice%20today')
url = '{0}{1}'.format(base_url, endpoint)
response = None
responses.add(
responses.DELETE,
url,
body=response,
status=204,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
counterexample = service.delete_counterexample(
workspace_id='boguswid', text='I want financial advice today').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert counterexample is None
@responses.activate
def test_get_counterexample():
endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format(
'boguswid', 'What%20are%20you%20wearing%3F')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"text": "What are you wearing?",
"created": "2016-07-11T23:53:59.153Z",
"updated": "2016-12-07T18:53:59.153Z"
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
counterexample = service.get_counterexample(
workspace_id='boguswid', text='What are you wearing?').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert counterexample == response
# Verify that response can be converted to a Counterexample
Counterexample._from_dict(counterexample)
@responses.activate
def test_list_counterexamples():
endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"counterexamples": [{
"text": "I want financial advice today.",
"created": "2016-07-11T16:39:01.774Z",
"updated": "2015-12-07T18:53:59.153Z"
}, {
"text": "What are you wearing today",
"created": "2016-07-11T16:39:01.774Z",
"updated": "2015-12-07T18:53:59.153Z"
}],
"pagination": {
"refresh_url":
"/v1/workspaces/pizza_app-e0f3/counterexamples?version=2017-12-18&page_limit=2",
"next_url":
"/v1/workspaces/pizza_app-e0f3/counterexamples?cursor=base64=&version=2017-12-18&page_limit=2"
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
counterexamples = service.list_counterexamples(workspace_id='boguswid').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert counterexamples == response
# Verify that response can be converted to a CounterexampleCollection
CounterexampleCollection._from_dict(counterexamples)
@responses.activate
def test_update_counterexample():
endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format(
'boguswid', 'What%20are%20you%20wearing%3F')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"text": "What are you wearing?",
"created": "2016-07-11T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
counterexample = service.update_counterexample(
workspace_id='boguswid',
text='What are you wearing?',
new_text='What are you wearing?').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert counterexample == response
# Verify that response can be converted to a Counterexample
Counterexample._from_dict(counterexample)
#########################
# entities
#########################
@responses.activate
def test_create_entity():
endpoint = '/v1/workspaces/{0}/entities'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"entity": "pizza_toppings",
"description": "Tasty pizza toppings",
"created": "2015-12-06T04:32:20.000Z",
"updated": "2015-12-07T18:53:59.153Z",
"metadata": {
"property": "value"
}
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
entity = service.create_entity(
workspace_id='boguswid',
entity='pizza_toppings',
description='Tasty pizza toppings',
metadata={"property": "value"},
values=None,
fuzzy_match=None).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert entity == response
# Verify that response can be converted to an Entity
Entity._from_dict(entity)
@responses.activate
def test_delete_entity():
endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings')
url = '{0}{1}'.format(base_url, endpoint)
response = ""
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
entity = service.delete_entity(workspace_id='boguswid', entity='pizza_toppings').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert entity == ""
@responses.activate
def test_get_entity():
endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"entity": "pizza_toppings",
"description": "Tasty pizza toppings",
"created": "2015-12-06T04:32:20.000Z",
"updated": "2015-12-07T18:53:59.153Z",
"metadata": {
"property": "value"
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
entity = service.get_entity(workspace_id='boguswid', entity='pizza_toppings', export=True).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert entity == response
# Verify that response can be converted to an Entity
Entity._from_dict(entity)
@responses.activate
def test_list_entities():
endpoint = '/v1/workspaces/{0}/entities'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"entities": [{
"entity": "pizza_toppings",
"description": "Tasty pizza toppings",
"created": "2015-12-06T04:32:20.000Z",
"updated": "2015-12-07T18:53:59.153Z",
"metadata": {
"property": "value"
}
}],
"pagination": {
"refresh_url":
"/v1/workspaces/pizza_app-e0f3/entities?version=2017-12-18&filter=name:pizza&include_count=true&page_limit=1",
"next_url":
"/v1/workspaces/pizza_app-e0f3/entities?cursor=base64=&version=2017-12-18&filter=name:pizza&page_limit=1",
"total":
1,
"matched":
1
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
entities = service.list_entities(
workspace_id='boguswid',
export=True).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert entities == response
# Verify that response can be converted to an EntityCollection
EntityCollection._from_dict(entities)
@responses.activate
def test_update_entity():
endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"entity": "pizza_toppings",
"description": "Tasty pizza toppings",
"created": "2015-12-06T04:32:20.000Z",
"updated": "2015-12-07T18:53:59.153Z",
"metadata": {
"property": "value"
}
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
entity = service.update_entity(
workspace_id='boguswid',
entity='pizza_toppings',
new_entity='pizza_toppings').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert entity == response
# Verify that response can be converted to an Entity
Entity._from_dict(entity)
#########################
# examples
#########################
@responses.activate
def test_create_example():
endpoint = '/v1/workspaces/{0}/intents/{1}/examples'.format(
'boguswid', 'pizza_order')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"text": "Gimme a pizza with pepperoni",
"created": "2016-07-11T16:39:01.774Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
example = service.create_example(
workspace_id='boguswid',
intent='pizza_order',
text='Gimme a pizza with pepperoni',
mentions=[{'entity': 'xxx', 'location': [0, 1]}]).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert example == response
# Verify that response can be converted to an Example
Example._from_dict(example)
@responses.activate
def test_delete_example():
endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni')
url = '{0}{1}'.format(base_url, endpoint)
response = {}
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=204,
content_type='')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
example = service.delete_example(
workspace_id='boguswid',
intent='pizza_order',
text='Gimme a pizza with pepperoni').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert example is None
@responses.activate
def test_get_example():
endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"text": "Gimme a pizza with pepperoni",
"created": "2016-07-11T23:53:59.153Z",
"updated": "2016-12-07T18:53:59.153Z"
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(version='2017-02-03', authenticator=authenticator)
example = service.get_example(
workspace_id='boguswid',
intent='pizza_order',
text='Gimme a pizza with pepperoni').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert example == response
# Verify that response can be converted to an Example
Example._from_dict(example)
@responses.activate
def test_list_examples():
endpoint = '/v1/workspaces/{0}/intents/{1}/examples'.format(
'boguswid', 'pizza_order')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"examples": [{
"text": "Can I order a pizza?",
"created": "2016-07-11T16:39:01.774Z",
"updated": "2015-12-07T18:53:59.153Z"
}, {
"text": "Gimme a pizza with pepperoni",
"created": "2016-07-11T16:39:01.774Z",
"updated": "2015-12-07T18:53:59.153Z"
}],
"pagination": {
"refresh_url":
"/v1/workspaces/pizza_app-e0f3/intents/order/examples?version=2017-12-18&page_limit=2",
"next_url":
"/v1/workspaces/pizza_app-e0f3/intents/order/examples?cursor=base64=&version=2017-12-18&page_limit=2"
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
examples = service.list_examples(
workspace_id='boguswid', intent='pizza_order').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert examples == response
# Verify that response can be converted to an ExampleCollection
ExampleCollection._from_dict(examples)
@responses.activate
def test_update_example():
endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"text": "Gimme a pizza with pepperoni",
"created": "2016-07-11T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
example = service.update_example(
workspace_id='boguswid',
intent='pizza_order',
text='Gimme a pizza with pepperoni',
new_text='Gimme a pizza with pepperoni',
new_mentions=[{'entity': 'xxx', 'location': [0, 1]}]).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert example == response
# Verify that response can be converted to an Example
Example._from_dict(example)
#########################
# intents
#########################
@responses.activate
def test_create_intent():
endpoint = '/v1/workspaces/{0}/intents'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"intent": "pizza_order",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z",
"description": "User wants to start a new pizza order"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
intent = service.create_intent(
workspace_id='boguswid',
intent='pizza_order',
description='User wants to start a new pizza order').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert intent == response
# Verify that response can be converted to an Intent
Intent._from_dict(intent)
@responses.activate
def test_delete_intent():
endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid',
'pizza_order')
url = '{0}{1}'.format(base_url, endpoint)
response = None
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=204,
content_type='')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
intent = service.delete_intent(
workspace_id='boguswid', intent='pizza_order').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert intent is None
@responses.activate
def test_get_intent():
endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid',
'pizza_order')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"intent": "pizza_order",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z",
"description": "User wants to start a new pizza order"
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
intent = service.get_intent(
workspace_id='boguswid', intent='pizza_order', export=False).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert intent == response
# Verify that response can be converted to an Intent
Intent._from_dict(intent)
@responses.activate
def test_list_intents():
endpoint = '/v1/workspaces/{0}/intents'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"intents": [{
"intent": "pizza_order",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z",
"description": "User wants to start a new pizza order"
}],
"pagination": {
"refresh_url":
"/v1/workspaces/pizza_app-e0f3/intents?version=2017-12-18&page_limit=1",
"next_url":
"/v1/workspaces/pizza_app-e0f3/intents?cursor=base64=&version=2017-12-18&page_limit=1"
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
intents = service.list_intents(workspace_id='boguswid', export=False).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert intents == response
# Verify that response can be converted to an IntentCollection
IntentCollection._from_dict(intents)
@responses.activate
def test_update_intent():
endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid',
'pizza_order')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"intent": "pizza_order",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z",
"description": "User wants to start a new pizza order"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
intent = service.update_intent(
workspace_id='boguswid',
intent='pizza_order',
new_intent='pizza_order',
new_description='User wants to start a new pizza order').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert intent == response
# Verify that response can be converted to an Intent
Intent._from_dict(intent)
def test_intent_models():
intent = Intent(intent="pizza_order",
created=datetime.datetime(2015, 12, 6, 23, 53, 59, 15300, tzinfo=tzutc()),
updated=datetime.datetime(2015, 12, 7, 18, 53, 59, 15300, tzinfo=tzutc()),
description="User wants to start a new pizza order")
intentDict = intent._to_dict()
check = Intent._from_dict(intentDict)
assert intent == check
#########################
# logs
#########################
@responses.activate
def test_list_logs():
endpoint = '/v1/workspaces/{0}/logs'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"logs": [{
"request": {
"input": {
"text": "Can you turn off the AC"
},
"context": {
"conversation_id": "f2c7e362-4cc8-4761-8b0f-9ccd70c63bca",
"system": {}
}
},
"response": {
"input": {
"text": "Can you turn off the AC"
},
"context": {
"conversation_id": "f2c7e362-4cc8-4761-8b0f-9ccd70c63bca",
"system": {
"dialog_stack": ["root"],
"dialog_turn_counter": 1,
"dialog_request_counter": 1
},
"defaultCounter": 0
},
"entities": [],
"intents": [{
"intent": "turn_off",
"confidence": 0.9332477126694649
}],
"output": {
"log_messages": [],
"text": [
"Hi. It looks like a nice drive today. What would you like me to do?"
],
"nodes_visited": ["node_1_1467221909631"]
}
},
"request_timestamp": "2016-07-16T09:22:38.960Z",
"response_timestamp": "2016-07-16T09:22:39.011Z",
"log_id": "e70d6c12-582d-47a8-a6a2-845120a1f232"
}],
"pagination": {
"next_url":
"/v1/workspaces/15fb0e8a-463d-4fec-86aa-a737d9c38a32/logs?cursor=dOfVSuh6fBpDuOxEL9m1S7JKDV7KLuBmRR+lQG1s1i/rVnBZ0ZBVCuy53ruHgPImC31gQv5prUsJ77e0Mj+6sGu/yfusHYF5&version=2016-07-11&filter=response.top_intent:turn_off&page_limit=1",
"matched":
215
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
logs = service.list_logs(
workspace_id='boguswid').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert logs == response
@responses.activate
def test_list_all_logs():
endpoint = '/v1/logs'
url = '{0}{1}'.format(base_url, endpoint)
response = {
"logs": [{
"request": {
"input": {
"text": "Good morning"
},
"context": {
"metadata": {
"deployment": "deployment_1"
}
}
},
"response": {
"intents": [{
"intent": "hello",
"confidence": 1
}],
"entities": [],
"input": {
"text": "Good morning"
},
"output": {
"text": ["Hi! What can I do for you?"],
"nodes_visited": ["node_2_1501875253968"],
"log_messages": []
},
"context": {
"metadata": {
"deployment": "deployment_1"
},
"conversation_id": "81a43b48-7dca-4a7d-a0d7-6fed03fcee69",
"system": {
"dialog_stack": [{
"dialog_node": "root"
}],
"dialog_turn_counter": 1,
"dialog_request_counter": 1,
"_node_output_map": {
"node_2_1501875253968": [0]
},
"branch_exited": True,
"branch_exited_reason": "completed"
}
}
},
"language": "en",
"workspace_id": "9978a49e-ea89-4493-b33d-82298d3db20d",
"request_timestamp": "2017-09-13T19:52:32.611Z",
"response_timestamp": "2017-09-13T19:52:32.628Z",
"log_id": "aa886a8a-bac5-4b91-8323-2fd61a69c9d3"
}],
"pagination": {}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
logs = service.list_all_logs(
'language::en,request.context.metadata.deployment::deployment_1').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert logs == response
#########################
# message
#########################
@responses.activate
def test_message():
authenticator = BasicAuthenticator('username', 'password')
assistant = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
assistant.set_default_headers({'x-watson-learning-opt-out': "true"})
workspace_id = 'f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec'
message_url = '%s/v1/workspaces/%s/message' % (base_url, workspace_id)
url1_str = '%s/v1/workspaces/%s/message?version=2017-02-03'
message_url1 = url1_str % (base_url, workspace_id)
message_response = {
"context": {
"conversation_id": "1b7b67c0-90ed-45dc-8508-9488bc483d5b",
"system": {
"dialog_stack": ["root"],
"dialog_turn_counter": 1,
"dialog_request_counter": 1
}
},
"intents": [],
"entities": [],
"input": {},
"output": {
"text": "okay",
"log_messages": []
}
}
responses.add(
responses.POST,
message_url,
body=json.dumps(message_response),
status=200,
content_type='application/json')
message = assistant.message(
workspace_id=workspace_id,
input={'text': 'Turn on the lights'},
context=None).get_result()
assert message is not None
assert responses.calls[0].request.url == message_url1
assert 'x-watson-learning-opt-out' in responses.calls[0].request.headers
assert responses.calls[0].request.headers['x-watson-learning-opt-out'] == 'true'
assert responses.calls[0].response.text == json.dumps(message_response)
# test context
responses.add(
responses.POST,
message_url,
body=message_response,
status=200,
content_type='application/json')
message_ctx = {
'context': {
'conversation_id': '1b7b67c0-90ed-45dc-8508-9488bc483d5b',
'system': {
'dialog_stack': ['root'],
'dialog_turn_counter': 2,
'dialog_request_counter': 1
}
}
}
message = assistant.message(
workspace_id=workspace_id,
input={'text': 'Turn on the lights'},
context=json.dumps(message_ctx['context'])).get_result()
assert message is not None
assert responses.calls[1].request.url == message_url1
assert responses.calls[1].response.text == json.dumps(message_response)
assert len(responses.calls) == 2
@responses.activate
def test_message_with_models():
authenticator = BasicAuthenticator('username', 'password')
assistant = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
assistant.set_default_headers({'x-watson-learning-opt-out': "true"})
workspace_id = 'f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec'
message_url = '%s/v1/workspaces/%s/message' % (base_url, workspace_id)
url1_str = '%s/v1/workspaces/%s/message?version=2017-02-03'
message_url1 = url1_str % (base_url, workspace_id)
message_response = {
"context": {
"conversation_id": "1b7b67c0-90ed-45dc-8508-9488bc483d5b",
"system": {
"dialog_stack": ["root"],
"dialog_turn_counter": 1,
"dialog_request_counter": 1
}
},
"intents": [],
"entities": [],
"input": {},
"output": {
"text": "okay",
"log_messages": []
}
}
responses.add(
responses.POST,
message_url,
body=json.dumps(message_response),
status=200,
content_type='application/json')
message = assistant.message(
workspace_id=workspace_id,
input=MessageInput(text='Turn on the lights'),
context=None).get_result()
assert message is not None
assert responses.calls[0].request.url == message_url1
assert 'x-watson-learning-opt-out' in responses.calls[0].request.headers
assert responses.calls[0].request.headers['x-watson-learning-opt-out'] == 'true'
assert responses.calls[0].response.text == json.dumps(message_response)
# test context
responses.add(
responses.POST,
message_url,
body=message_response,
status=200,
content_type='application/json')
message_ctx = Context._from_dict(message_response['context'])
message = assistant.message(
workspace_id=workspace_id,
input=MessageInput(text='Turn on the lights'),
context=message_ctx).get_result()
assert message is not None
assert responses.calls[1].request.url == message_url1
assert responses.calls[1].response.text == json.dumps(message_response)
assert len(responses.calls) == 2
#########################
# synonyms
#########################
@responses.activate
def test_create_synonym():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format(
'boguswid', 'aeiou', 'vowel')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"synonym": "aeiou",
"created": "2000-01-23T04:56:07.000+00:00",
"updated": "2000-01-23T04:56:07.000+00:00"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
synonym = service.create_synonym(
workspace_id='boguswid', entity='aeiou', value='vowel', synonym='a').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert synonym == response
# Verify that response can be converted to a Synonym
Synonym._from_dict(synonym)
@responses.activate
def test_delete_synonym():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
'boguswid', 'aeiou', 'vowel', 'a')
url = '{0}{1}'.format(base_url, endpoint)
response = None
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=204,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
synonym = service.delete_synonym(
workspace_id='boguswid', entity='aeiou', value='vowel', synonym='a').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert synonym is None
@responses.activate
def test_get_synonym():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
'boguswid', 'grilling', 'bbq', 'barbecue')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"synonym": "barbecue",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
synonym = service.get_synonym(
workspace_id='boguswid', entity='grilling', value='bbq', synonym='barbecue').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert synonym == response
# Verify that response can be converted to a Synonym
Synonym._from_dict(synonym)
@responses.activate
def test_list_synonyms():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format(
'boguswid', 'grilling', 'bbq')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"synonyms": [{
"synonym": "BBQ sauce",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}, {
"synonym": "barbecue",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}],
"pagination": {
"refresh_url":
"/v1/workspaces/pizza_app-e0f3/entities/sauce/values/types/synonyms?version=2017-12-18&filter=name:b&include_count=true&page_limit=2",
"next_url":
"/v1/workspaces/pizza_app-e0f3/entities/sauce/values/types/synonyms?cursor=base64=&version=2017-12-18&filter=name:b&page_limit=2",
"total":
8,
"matched":
2
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
synonyms = service.list_synonyms(
workspace_id='boguswid',
entity='grilling',
value='bbq').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert synonyms == response
# Verify that response can be converted to a SynonymCollection
SynonymCollection._from_dict(synonyms)
@responses.activate
def test_update_synonym():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
'boguswid', 'grilling', 'bbq', 'barbecue')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"synonym": "barbecue",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
synonym = service.update_synonym(
workspace_id='boguswid', entity='grilling', value='bbq', synonym='barbecue', new_synonym='barbecue').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert synonym == response
# Verify that response can be converted to a Synonym
Synonym._from_dict(synonym)
#########################
# values
#########################
@responses.activate
def test_create_value():
endpoint = '/v1/workspaces/{0}/entities/{1}/values'.format('boguswid', 'grilling')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"metadata": "{}",
"created": "2000-01-23T04:56:07.000+00:00",
"value": "aeiou",
"type": "synonyms",
"updated": "2000-01-23T04:56:07.000+00:00"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
value = service.create_value(
workspace_id='boguswid',
entity='grilling',
value='aeiou').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert value == response
# Verify that response can be converted to a Value
Value._from_dict(value)
@responses.activate
def test_delete_value():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
'boguswid', 'grilling', 'bbq')
url = '{0}{1}'.format(base_url, endpoint)
response = ""
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
value = service.delete_value(
workspace_id='boguswid', entity='grilling', value='bbq').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert value == ""
@responses.activate
def test_get_value():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
'boguswid', 'grilling', 'bbq')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"value": "BBQ sauce",
"metadata": {
"code": 1422
},
"type": "synonyms",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
value = service.get_value(
workspace_id='boguswid', entity='grilling', value='bbq', export=True).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert value == response
# Verify that response can be converted to a Value
Value._from_dict(value)
@responses.activate
def test_list_values():
endpoint = '/v1/workspaces/{0}/entities/{1}/values'.format('boguswid', 'grilling')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"values": [{
"value": "BBQ sauce",
"metadata": {
"code": 1422
},
"type": "synonyms",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-07T18:53:59.153Z"
}],
"pagination": {
"refresh_url":
"/v1/workspaces/pizza_app-e0f3/entities/sauce/values?version=2017-12-18&filter=name:pizza&include_count=true&page_limit=1",
"next_url":
"/v1/workspaces/pizza_app-e0f3/sauce/values?cursor=base64=&version=2017-12-18&filter=name:pizza&page_limit=1",
"total":
1,
"matched":
1
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
values = service.list_values(
workspace_id='boguswid',
entity='grilling',
export=True).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert values == response
# Verify that response can be converted to a ValueCollection
ValueCollection._from_dict(values)
@responses.activate
def test_update_value():
endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
'boguswid', 'grilling', 'bbq')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"value": "BBQ sauce",
"metadata": {
"code": 1422
},
"type": "synonyms",
"created": "2015-12-06T23:53:59.153Z",
"updated": "2015-12-06T23:53:59.153Z"
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
value = service.update_value(
workspace_id='boguswid',
entity='grilling',
value='bbq',
new_value='BBQ sauce',
new_metadata={"code": 1422},
new_synonyms=None).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert value == response
# Verify that response can be converted to a Value
Value._from_dict(value)
#########################
# workspaces
#########################
@responses.activate
def test_create_workspace():
endpoint = '/v1/workspaces'
url = '{0}{1}'.format(base_url, endpoint)
response = {
"name": "Pizza app",
"created": "2015-12-06T23:53:59.153Z",
"language": "en",
"metadata": {},
"updated": "2015-12-06T23:53:59.153Z",
"description": "Pizza app",
"workspace_id": "pizza_app-e0f3",
"learning_opt_out": True
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
workspace = service.create_workspace(
name='Pizza app', description='Pizza app', language='en', metadata={},
system_settings={'tooling': {'store_generic_responses' : True}}).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert workspace == response
# Verify that response can be converted to a Workspace
Workspace._from_dict(workspace)
@responses.activate
def test_delete_workspace():
endpoint = '/v1/workspaces/{0}'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {}
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=204,
content_type='')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
workspace = service.delete_workspace(workspace_id='boguswid').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert workspace is None
@responses.activate
def test_get_workspace():
endpoint = '/v1/workspaces/{0}'.format('boguswid')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"name": "Pizza app",
"created": "2015-12-06T23:53:59.153Z",
"language": "en",
"metadata": {},
"updated": "2015-12-06T23:53:59.153Z",
"description": "Pizza app",
"status": "Available",
"learning_opt_out": False,
"workspace_id": "pizza_app-e0f3"
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
workspace = service.get_workspace(workspace_id='boguswid', export=True, sort='stable').get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert workspace == response
# Verify that response can be converted to a Workspace
Workspace._from_dict(workspace)
@responses.activate
def test_list_workspaces():
endpoint = '/v1/workspaces'
url = '{0}{1}'.format(base_url, endpoint)
response = {
"workspaces": [{
"name": "Pizza app",
"created": "2015-12-06T23:53:59.153Z",
"language": "en",
"metadata": {},
"updated": "2015-12-06T23:53:59.153Z",
"description": "Pizza app",
"workspace_id": "pizza_app-e0f3",
"learning_opt_out": True
}],
"pagination": {
"refresh_url":
"/v1/workspaces?version=2016-01-24&page_limit=1",
"next_url":
"/v1/workspaces?cursor=base64=&version=2016-01-24&page_limit=1"
}
}
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
workspaces = service.list_workspaces().get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert workspaces == response
# Verify that response can be converted to a WorkspaceCollection
WorkspaceCollection._from_dict(workspaces)
@responses.activate
def test_update_workspace():
endpoint = '/v1/workspaces/{0}'.format('pizza_app-e0f3')
url = '{0}{1}'.format(base_url, endpoint)
response = {
"name": "Pizza app",
"created": "2015-12-06T23:53:59.153Z",
"language": "en",
"metadata": {},
"updated": "2015-12-06T23:53:59.153Z",
"description": "Pizza app",
"workspace_id": "pizza_app-e0f3",
"learning_opt_out": True
}
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
workspace = service.update_workspace(
workspace_id='pizza_app-e0f3',
name='Pizza app',
description='Pizza app',
language='en',
metadata={},
system_settings={'tooling': {'store_generic_responses' : True}}).get_result()
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(url)
assert workspace == response
# Verify that response can be converted to a Workspace
Workspace._from_dict(workspace)
@responses.activate
def test_dialog_nodes():
url = 'https://gateway.watsonplatform.net/assistant/api/v1/workspaces/id/dialog_nodes'
responses.add(
responses.GET,
url,
body='{ "application/json": { "dialog_node": "location-atm" }}',
status=200,
content_type='application/json')
responses.add(
responses.POST,
"{0}?version=2017-02-03".format(url),
body='{ "application/json": { "dialog_node": "location-done" }}',
status=200,
content_type='application/json')
responses.add(
responses.DELETE,
"{0}/location-done?version=2017-02-03".format(url),
body='{"description": "deleted successfully"}',
status=200,
content_type='application/json')
responses.add(
responses.GET,
"{0}/location-done?version=2017-02-03".format(url),
body='{ "application/json": { "dialog_node": "location-atm" }}',
status=200,
content_type='application/json')
authenticator = BasicAuthenticator('username', 'password')
assistant = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
assistant.create_dialog_node('id', 'location-done', user_label='xxx')
assert responses.calls[0].response.json()['application/json']['dialog_node'] == 'location-done'
assistant.delete_dialog_node('id', 'location-done')
assert responses.calls[1].response.json() == {"description": "deleted successfully"}
assistant.get_dialog_node('id', 'location-done')
assert responses.calls[2].response.json() == {"application/json": {"dialog_node": "location-atm"}}
assistant.list_dialog_nodes('id')
assert responses.calls[3].response.json() == {"application/json": {"dialog_node": "location-atm"}}
assert len(responses.calls) == 4
@responses.activate
def test_delete_user_data():
url = 'https://gateway.watsonplatform.net/assistant/api/v1/user_data'
responses.add(
responses.DELETE,
url,
body=None,
status=204,
content_type='application_json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
response = service.delete_user_data('id').get_result()
assert response is None
assert len(responses.calls) == 1
@responses.activate
def test_list_mentions():
url = 'https://gateway.watsonplatform.net/assistant/api/v1/workspaces/workspace_id/entities/entity1/mentions'
responses.add(
responses.GET,
url,
body='[{"entity": "xxx"}]',
status=200,
content_type='application_json')
authenticator = BasicAuthenticator('username', 'password')
service = ibm_watson.AssistantV1(
version='2017-02-03', authenticator=authenticator)
response = service.list_mentions('workspace_id', 'entity1').get_result()
assert response == [{"entity": "xxx"}]
assert len(responses.calls) == 1
|
987,843 | b6841397e45fda7010b20d00d9df975636b198eb | # Generated by Django 2.0.1 on 2018-02-20 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recruiting', '0003_auto_20180216_2214'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='long_answer',
field=models.TextField(blank=True, null=True, verbose_name='항목 답변 내용'),
),
migrations.AlterField(
model_name='answer',
name='short_answer',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='질문 답변 내용'),
),
migrations.AlterField(
model_name='applicantresume',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='지원서 사진'),
),
]
|
987,844 | 9ad2b0d30ff85cfdf30ecd9a138fc920bcc3e20d | def test_request_items_runner_fixture(testdir):
"""Make sure that pytest accepts our fixture."""
# create a temporary pytest test module
testdir.makepyfile(
"""
def test_exists(request_items_runner):
assert request_items_runner
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_exists PASSED*"])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest("--help")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["requests:", "*--requests-baseurl*"])
result.stdout.fnmatch_lines(["requests:", "*--requests-timeout*"])
result.stdout.fnmatch_lines(["requests:", "*--requests-extra-vars*"])
|
987,845 | 5f59011b42d1c515f33ada68e31f0e527c884d1e | # 🚨 Don't change the code below 👇
two_digit_number = input("Type a two digit number: ")
# 🚨 Don't change the code above 👆
####################################
#Write your code below this line 👇
first_digit = int(two_digit_number[0]) # convert to int for adding
second_digit = int(two_digit_number[1])
print(first_digit + second_digit)
# number = int(two_digit_number)
# print((number // 10) + (number % 10))
|
987,846 | 302664130552e9c9dd7a7f0e93422856461f358a | routers = dict(
# base router
BASE=dict(
default_application='blog',
),
blog=dict(
default_controller='initial',
default_function='home',
functions=['home', 'contact'],
)
)
|
987,847 | 3804238fcc0247da53c61db1c2374d2e920e3664 | import tensorflow as tf
import numpy as np
from viz import viz_utils
from architectures import th_utils
FLAGS = tf.flags.FLAGS
def gen_composed_hierarchical_seqs(low_level_seqs, dt, n_seqs):
n_segs, batch_size, seg_len = dt.shape
assert(n_seqs <= batch_size, "Number of requested vis seqs in larger than batch_size!")
output_seqs = [None for _ in range(n_seqs)]
keyframe_idxs = [[] for _ in range(n_seqs)]
max_dt = np.argmax(dt, axis=2)
for seq_idx in range(n_seqs):
for seg_idx in range(n_segs):
seg_subseq = low_level_seqs[seg_idx, :(max_dt[seg_idx, seq_idx]+1), seq_idx]
if output_seqs[seq_idx] is None:
output_seqs[seq_idx] = seg_subseq
else:
output_seqs[seq_idx] = np.concatenate((output_seqs[seq_idx], seg_subseq), axis=0)
keyframe_idxs[seq_idx].append(output_seqs[seq_idx].shape[0]-1)
return output_seqs, keyframe_idxs
def mark_keyframes(seqs, is_keyframe):
for seq_idx in range(seqs.shape[1]):
for frame_idx in range(seqs.shape[0]):
if is_keyframe[frame_idx, seq_idx]:
# bold frame of keyframe
seqs[frame_idx, seq_idx, :, :, :2] = 1.0
seqs[frame_idx, seq_idx, :, :, -2:] = 1.0
seqs[frame_idx, seq_idx, :, :2, :] = 1.0
seqs[frame_idx, seq_idx, :, -2:, :] = 1.0
return seqs
def gen_hierarchical_plot_imgs(gt_seqs_condition, gt_seqs_predict, predicted_seqs,
target_seqs, keyframes, keyframe_idxs, attention_weights, gt_keyframe_idxs):
"""
:param gt_seqs_condition:
:param gt_seqs_predict:
:param predicted_seqs:
:param target_seqs:
:param keyframes:
:param keyframe_idxs:
:param attention_weights: array of n_keyframes x n_frames x batch_size
:return:
"""
show_gt_keyframes = gt_keyframe_idxs is not None and gt_keyframe_idxs.size > 0 # if exist
if show_gt_keyframes:
gt_seqs_predict = mark_keyframes(gt_seqs_predict, gt_keyframe_idxs)
gt_seqs = np.concatenate((gt_seqs_condition, gt_seqs_predict), axis=0)
conditioning_offset = gt_seqs_condition.shape[0]
output_imgs = []
_, _, _, img_res, _ = gt_seqs.shape
max_frames = FLAGS.n_frames_segment * FLAGS.n_segments + FLAGS.input_seq_len
for seq_idx in range(len(predicted_seqs)):
stack_list = []
def pad_seq(seq, offset=1):
idxs = [i + conditioning_offset * offset for i in range(len(seq))]
return viz_utils.pad_sequence(seq, idxs, max_frames)
stack_list.append(pad_seq(gt_seqs[:, seq_idx], offset=False))
# generate keyframe seq
if keyframe_idxs is not None:
keyframe_idxs_i = [i + conditioning_offset for i in keyframe_idxs[seq_idx]]
keyframe_seq_i = viz_utils.pad_sequence(keyframes[:, seq_idx], keyframe_idxs_i, max_frames)
stack_list.append(keyframe_seq_i)
stack_list.append(pad_seq(predicted_seqs[seq_idx]))
if target_seqs is not None:
stack_list.append(pad_seq(target_seqs[seq_idx]))
n_loss_frames = th_utils.get_future_loss_length() + conditioning_offset
else:
n_loss_frames = None
output_img = viz_utils.stack_seqs(stack_list, swap_channels=True, red_line=n_loss_frames)
# Attention visualization
if attention_weights is not None:
n_keys, n_frames, _ = attention_weights.shape
attention_seq = attention_weights[:, :max_frames, seq_idx]
if FLAGS.use_full_inf:
attention_idxs = np.arange(attention_seq.shape[1])
else:
attention_idxs = np.arange(attention_seq.shape[1]) + conditioning_offset
attention_seq = viz_utils.pad_sequence((attention_seq.T),
attention_idxs,
max_frames,
color=0).T
n_channels = output_img.shape[2]
att_height = 2
attention_seq = np.tile(attention_seq.reshape([n_keys, 1, max_frames, 1, 1]),
[1, att_height, 1, img_res, n_channels]).reshape([n_keys * att_height, max_frames * img_res, n_channels])
output_img = np.concatenate([attention_seq, output_img], axis=0)
if output_img.shape[-1] == 1:
output_img = output_img[..., 0] # in case we have a grayscale image reduce channel dimension
output_imgs.append(output_img)
return output_imgs
def gen_segment_overviews(segments, dt, n_seqs):
n_seg, _, seg_len = dt.shape
output_imgs = []
for seq_idx in range(n_seqs):
seg_seq_stack = []
for seg_idx in range(n_seg):
segment = segments[seg_idx, :, seq_idx]
segment = np.transpose(segment, (0, 2, 3, 1)) # put channel to last dim
for seg_img_idx in range(seg_len):
segment[seg_img_idx, :2] = dt[seg_idx, seq_idx, seg_img_idx] # color top of img in gray indicating weight
segment = np.concatenate(np.split(segment, segment.shape[0], axis=0), axis=2)[0]
if segment.shape[-1] == 1:
segment = segment[..., 0] # remove last channel for grayscale img
seg_seq_stack.append(segment)
output_imgs.append(np.concatenate(seg_seq_stack, axis=0))
return output_imgs
def gen_html_summary(gif_frame_seqs, iteration, base_dir):
webpage = viz_utils.init_html(base_dir, iteration)
webpage_parent = viz_utils.init_html(base_dir, iteration, make_parent=True)
webpages = [webpage, webpage_parent]
webpage = viz_utils.dump_gif_to_html(gif_frame_seqs, iteration, webpages, "Predictions")
# save webpage to subfolder for iteration and update global summary
[wp.save() for wp in webpages]
def log_sess_output(sess_output,
monitor_index,
logger,
iteration,
dataset_name,
base_dir,
n_seqs=5,
phase="train",
build_seq_ims=True,
repeat=0,
is_hierarchical=False):
"""Logs the session output.
Args:
sess_output: A dictionary of evaluated values (not tensors) to log.
monitor_index: The index of Tensor phase and types
logger: A Logger object.
iteration: The current training iteration.
dataset_name: A string with the name of the dataset.
n_seq: Number of sequences that should be logged at max. Defaults to 5.
phase: The current phase, "train" or "val".
build_seq_ims: If True, will be build sequence images from the ground truth
and estimates returned by evaluation.. Defaults to True.
base_dir: Base directory for saving visualization results.
Raises:
ValueError if phase is not "train" or "val".
"""
for type_key, type_vals in monitor_index[phase].items():
for type_ind in type_vals:
if type_key in ["scalar", "metric", "loss"]:
if type_ind in sess_output:
logger.log_scalar(
tag=type_ind,
value=sess_output[type_ind],
step=iteration)
elif type_key == "hist":
if type_ind in sess_output:
logger.log_histogram(
tag=type_ind,
values=sess_output[type_ind],
step=iteration)
elif type_key == "sum":
if type_ind in sess_output:
logger.log_summary(sess_output[type_ind], step=iteration)
if build_seq_ims:
if phase == "train":
s = ""
elif phase == "val":
s = "_val"
else:
raise NotImplementedError("Visualization is currently only implemented for train and val!")
if ("low_level_images" + s) not in sess_output.keys():
return
# Build sequence image to save
n_seqs = min(n_seqs, sess_output["input_images" + s].shape[1])
# Draw action arrows if dataset is top
if FLAGS.dataset_config_name == 'top':
input_seq_len = sess_output["input_images" + s].shape[0]
predict_seq_len = sess_output["predict_images" + s].shape[0]
sess_output["input_images" + s] = viz_utils.draw_actions_on_images(sess_output["input_images" + s],
sess_output["actions" + s][:input_seq_len])
sess_output["predict_images" + s] = viz_utils.draw_actions_on_images(sess_output["predict_images" + s],
sess_output["actions" + s][input_seq_len:input_seq_len + predict_seq_len])
if 'regressed_actions' + s in sess_output:
sess_output["low_level_images" + s] = viz_utils.draw_actions_on_images(sess_output["low_level_images" + s],
sess_output["regressed_actions" + s])
if is_hierarchical:
composed_seqs, keyframe_idxs = gen_composed_hierarchical_seqs(sess_output["low_level_images" + s],
sess_output["dt" + s],
n_seqs)
postfix = "" if phase is "train" else "_val"
if "gt_target_low_level_images"+postfix in sess_output:
composed_targets = sess_output["gt_target_low_level_images"+postfix]
composed_targets = np.split(composed_targets, composed_targets.shape[1], axis=1)
composed_targets = [ct[:, 0] for ct in composed_targets]
else:
composed_targets, _ = gen_composed_hierarchical_seqs(sess_output["low_level_image_targets" + s],
sess_output["dt" + s],
n_seqs)
else:
split_output_seqs = np.split(sess_output["low_level_images" + s],
sess_output["low_level_images" + s].shape[1], axis=1)
split_output_seqs = [so[:, 0] for so in split_output_seqs[:n_seqs]]
if is_hierarchical:
kfs = sess_output["high_level_images" + s]
kf_idxs = keyframe_idxs
gt_kf_idxs = sess_output["actions_abs" + s]\
[-(sess_output["predict_images" + s].shape[0]+FLAGS.n_frames_segment):-FLAGS.n_frames_segment, :, 0]
elif "kl_based_kfs"+s in sess_output:
kfs = sess_output["kl_based_kfs" + s][:, :n_seqs]
kf_idxs = [np.where(sess_output["kl_based_kfs_idxs" + s][:, i])[0] for i in range(n_seqs)]
gt_kf_idxs = sess_output["actions_abs" + s]\
[sess_output["input_images" + s].shape[0]:, :, 0]
else:
kfs = None
kf_idxs = None
gt_kf_idxs = None
plot_imgs = gen_hierarchical_plot_imgs(sess_output["input_images" + s],
sess_output["predict_images" + s],
composed_seqs if is_hierarchical else split_output_seqs,
composed_targets if is_hierarchical else None,
kfs,
kf_idxs,
sess_output["attention_weights" + s] if is_hierarchical else None,
gt_kf_idxs)
logger.log_images(
tag="image_predictions" + s,
images=plot_imgs,
step=iteration
)
# build kl overview for kl-based keyframes
if "kl_based_kfs_kl" + s in sess_output:
for suffix in ["", "_reencode"]:
kl = sess_output["kl_based_kfs"+suffix+"_kl" + s]
figs = []
for idx in range(n_seqs):
figs.append(viz_utils.plot_single_kl_seq(kl[:, idx], gt_kf_idxs[:, idx]))
logger.log_figures(
tag="kl_values" + suffix + s,
figures=figs,
step=iteration
)
viz_utils.close_figs(figs)
# build segment overview image
if is_hierarchical:
overview_imgs = gen_segment_overviews(sess_output["low_level_images" + s], sess_output["dt" + s], n_seqs)
logger.log_images(
tag="segment_overview" + s,
images=overview_imgs,
step=iteration
)
if "low_level_image_targets" + s in sess_output:
overview_targets = gen_segment_overviews(sess_output["low_level_image_targets" + s], sess_output["dt" + s], n_seqs)
logger.log_images(
tag="segment_targets_overview" + s,
images=overview_targets,
step=iteration
)
# cut gt seq to output seq length
gt_seq = np.concatenate((sess_output["input_images" + s], sess_output["predict_images" + s]), axis=0)
if is_hierarchical:
est_seqs = []
for comp_seq, kf_idxs in zip(composed_seqs, keyframe_idxs):
for kf_idx in kf_idxs:
comp_seq[kf_idx, :, :2, :] = 1.0 # mark keyframes with white bar at the top
dummy_seq = np.zeros(([sess_output["input_images" + s].shape[0]] + list(comp_seq.shape[-3:])))
est_seqs.append(np.concatenate((dummy_seq, comp_seq), axis=0))
else:
est_seqs = split_output_seqs
# log gifs to tensorboard and html
gif_frame_seqs = viz_utils.create_concat_seqs(gt_seq,
est_seqs,
n_input_frames=sess_output["input_images" + s].shape[0] - 1)
prep_gif_frame_seqs = viz_utils.prep_gif_seqs_tb(gif_frame_seqs)
logger.log_gifs(
tag="gif_image_predictions" + s,
gif_images=prep_gif_frame_seqs,
step=iteration
)
if phase == "val":
if tf.flags.FLAGS.gen_html_summary:
gen_html_summary(gif_frame_seqs, iteration, base_dir)
return
if __name__ == "__main__":
sess_output = np.load("/home/karl/Downloads/sess_output.npy").item()
|
987,848 | 5512ea6def652865c440188f5b77c79c07d1241a | from sympy.ntheory.continued_fraction import continued_fraction_reduce
from sympy import fraction
from euler_helpers import digit_sum
def e_convergent_fraction(n):
def cycle():
k = 2
yield 2
while True:
yield 1
yield k
yield 1
k += 2
c = cycle()
for _ in range(n):
yield next(c)
def p065():
return digit_sum(fraction(continued_fraction_reduce(e_convergent_fraction(100)))[0])
def test_p065():
assert digit_sum(fraction(continued_fraction_reduce([2, 1, 2, 1, 1, 4, 1, 1, 6, 1]))[0]) == 17
assert digit_sum(fraction(continued_fraction_reduce(e_convergent_fraction(10)))[0]) == 17
if __name__ == '__main__':
print(p065())
|
987,849 | f7e7783bd788c044b4faeb28ea56a032a0e5ca31 | import sys
sys.path.append("..")
sys.path.append("../..")
import numpy as np
import os
import argparse
from sklearn.metrics import accuracy_score
import tensorflow as tf
from data.kenyan_water_dataset import KenyanWaterDataset, get_subportion_confounders
from data.kenyan_water_dataset import median_child_paper, median_child
from models_kenyan import Twin_Net_Kenyan, Twin_Net_Kenyan_with_Z_A
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
import copy
def get_test_confs(dataset, args, treatment_factual=None, mode='test'):
if mode == 'test':
if args.multiple_confounders:
conf_to_input = [dataset.test[i].values.astype(np.float32) for i in dataset.test.columns]
else:
conf_to_input = [dataset.test.values.astype(np.float32)]
elif mode == 'paper_median':
if 'all' in args.confounders:
args.confounders = median_child_paper.keys()
conf_to_input = [np.tile(median_child_paper[i], (len(treatment_factual))) for i in args.confounders]
if not args.multiple_confounders:
conf_to_input = np.array(conf_to_input).T
elif mode == 'dataset_median':
if 'all' in args.confounders:
args.confounders = median_child.keys()
conf_to_input = [np.tile(median_child[i], (len(treatment_factual))) for i in args.confounders]
if not args.multiple_confounders:
conf_to_input = np.array(conf_to_input).T
return conf_to_input
def prob_nec(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
idx_given_y_1 = np.where(y == outcomes_factual)[0]
idx_query_y_0 = np.where(y_prime == outcomes_counter)[0]
idx_y_1_y_prime_0 = set(idx_given_y_1).intersection(idx_query_y_0)
prob_necessity_1 = len(idx_y_1_y_prime_0) / len(idx_given_y_1)
return prob_necessity_1
def prob_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
idx_given_y_0 = np.where(y == outcomes_factual)[0]
idx_query_y_1 = np.where(y_prime == outcomes_counter)[0]
idx_y_0_y_prime_1 = set(idx_given_y_0).intersection(idx_query_y_1)
prob_suficiency = len(idx_y_0_y_prime_1) / len(idx_given_y_0)
return prob_suficiency
def prob_nec_and_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args):
preds = model.predict([treatment_factual, treatment_counter, uy_to_input,
conf_to_input],
args.batch_size, 1)
pred_factual = preds[0]
pred_counter = preds[1]
scaler = MinMaxScaler()
y = scaler.fit_transform(pred_factual)
y_prime = scaler.fit_transform(pred_counter)
y[y >= 0.5] = 1
y[y < 0.5] = 0
y_prime[y_prime >= 0.5] = 1
y_prime[y_prime < 0.5] = 0
# idx_given_y_0 = np.sum(y == outcomes_factual) / len(y)
# idx_query_y_1 = np.sum(y_prime == outcomes_counter)/ len(y_prime)
idx_given_y_0 = np.where(y == outcomes_factual)[0]
idx_query_y_1 = np.where(y_prime == outcomes_counter)[0]
idx_y_0_y_prime_1 = set(idx_given_y_0).intersection(idx_query_y_1)
prob_nec_and_suficiency = len(idx_y_0_y_prime_1)/len(y)
# prob_nec_and_suficiency = np.abs(idx_given_y_0 - idx_query_y_1)
return prob_nec_and_suficiency
def run_inference(args):
dataset = KenyanWaterDataset(**vars(args))
target = dataset.train.pop('targets')
target_prime = dataset.train.pop('targets_prime')
treatment = dataset.train.pop('treatment')
treatment_prime = dataset.train.pop('treatment_prime')
uy = dataset.train.pop('uy')
target_test = dataset.test.pop('targets')
target_prime_test = dataset.test.pop('targets_prime')
treatment_test = dataset.test.pop('treatment')
treatment_prime_test = dataset.test.pop('treatment_prime')
uy_test = dataset.test.pop('uy')
# Get confounders
dataset.train = get_subportion_confounders(dataset.train, args.confounders)
dataset.test = get_subportion_confounders(dataset.test, args.confounders)
args.len_conf = len(dataset.train.columns)
if args.multiple_confounders:
args.z_monotonicity = []
for i, col in enumerate(dataset.train.columns):
args.z_monotonicity.append(args.z_monotonicity_base[col])
args.lattice_sizes.append(args.z_calib_units)
input_len = args.len_conf + 2
else:
input_len = 3
args.z_monotonicity = [args.z_monotonicity]
args.lattice_sizes.append(args.len_conf)
if 'az' in args.runPath and args.layer != 'linear':
model = Twin_Net_Kenyan_with_Z_A(treatment, uy, dataset.train, args)
else:
model = Twin_Net_Kenyan(treatment, uy, dataset.train, args)
# Set up loss
if 'mse' in args.loss:
loss_func = tf.keras.losses.mean_squared_error
elif 'mae' in args.loss:
loss_func = tf.keras.losses.mean_absolute_error
elif 'bce' in args.loss:
loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True)
model.compile(
loss=loss_func)
print('-------------------------Experiment: {} ---------------------'.format(args.inference_name))
model.build((1, input_len))
model.load_weights(args.runPath + '/best')
conf_to_input = [dataset.test.values.astype(np.float32)]
if args.multiple_confounders:
conf_to_input = [dataset.test[i].values.astype(np.float32) for i in dataset.test.columns]
test_loss = model.evaluate(
[treatment_test.values.astype(np.float32), treatment_prime_test.values.astype(np.float32),
uy_test.values.astype(np.float32),
conf_to_input],
[target_test[..., np.newaxis], target_prime_test[..., np.newaxis]])
print('Test Loss : {}'.format(test_loss))
preds = model.predict([treatment_test.values.astype(np.float32), treatment_prime_test.values.astype(np.float32),
uy_test.values.astype(np.float32), conf_to_input],
args.batch_size, 1)
title = ['Factual', 'Counterfactual']
preds = preds[0:2]
for i, pred in enumerate(preds):
scaler = MinMaxScaler()
pred = scaler.fit_transform(pred)
pred[pred > 0.5] = 1
pred[pred < 0.5] = 0
ac = accuracy_score(pred, target_test)
print('{} Acc: {}'.format(title[i], ac))
f1 = f1_score(target_test, pred)
print('{} F1 : {}'.format(title[i], f1))
if not args.train:
prob_necessities,prob_sufficiencies, prob_both = calc_probs(args, treatment_test, dataset, model)
return prob_necessities,prob_sufficiencies, prob_both
else:
calc_probs(args, treatment_test, dataset, model)
def calc_probs(args, treatment_test, dataset, model):
N = len(treatment_test)
# N = 10000
uy_samples = dataset.get_uy_samples(N)
treatment_factual = np.ones(N)
treatment_counter = np.zeros(N)
outcomes_factual = 1
outcomes_counter = 0
uy_to_input = uy_samples
conf_to_input = get_test_confs(dataset, args, mode='test')
prob_necessity_1 = prob_nec(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input,
outcomes_factual, outcomes_counter, args)
print('Test Probability of Necessity : {}'.format(prob_necessity_1))
conf_to_input = get_test_confs(dataset, args, treatment_factual=treatment_factual, mode='paper_median')
prob_necessity_2 = prob_nec(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input,
outcomes_factual, outcomes_counter, args)
print('Median Child Paper Test Probability of Necessity : {}'.format(prob_necessity_2))
conf_to_input = get_test_confs(dataset, args, treatment_factual=treatment_factual, mode='dataset_median')
prob_necessity_3 = prob_nec(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input,
outcomes_factual, outcomes_counter, args)
print('Median Child Dataset Test Probability of Necessity : {}'.format(prob_necessity_3))
# ##################### Prob of Sufficiency #####################
N = len(treatment_test)
# N = 10000
# uy_samples = dataset.get_uy_samples(N)
# uy_to_input = uy_samples
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
outcomes_factual = 0
outcomes_counter = 1
conf_to_input = get_test_confs(dataset, args, mode='test')
prob_suf_1 = prob_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Test Probability of Sufficiency : {}'.format(prob_suf_1))
# N = 10000
# uy_samples = dataset.get_uy_samples(N)
# uy_to_input = uy_samples
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
conf_to_input = get_test_confs(dataset, args, treatment_factual=treatment_factual, mode='paper_median')
prob_suf_2 = prob_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Median Child Paper Test Probability of Sufficiency : {}'.format(prob_suf_2))
conf_to_input = get_test_confs(dataset, args, treatment_factual=treatment_factual, mode='dataset_median')
prob_suf_3 = prob_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Median Child Dataset Test Probability of Sufficiency : {}'.format(prob_suf_3))
# ############ PROB of Nec and Suf ##################
N = len(treatment_test)
# N = 10000
# uy_samples = dataset.get_uy_samples(N)
# uy_to_input = uy_samples
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
outcomes_factual = 0
outcomes_counter = 1
conf_to_input = get_test_confs(dataset, args, mode='test')
prob_nec_suf_1 = prob_nec_and_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Test Probability of Necessity and Sufficiency : {}'.format(prob_nec_suf_1))
N = 10000
uy_samples = dataset.get_uy_samples(N)
uy_to_input = uy_samples
treatment_factual = np.zeros(N)
treatment_counter = np.ones(N)
conf_to_input = get_test_confs(dataset, args, treatment_factual=treatment_factual, mode='paper_median')
prob_nec_suf_2 = prob_nec_and_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Median Child Paper Test Probability of Necessity and Sufficiency : {}'.format(prob_nec_suf_2))
conf_to_input = get_test_confs(dataset, args, treatment_factual=treatment_factual, mode='dataset_median')
prob_nec_suf_3 = prob_nec_and_suf(model, treatment_factual, treatment_counter, uy_to_input, conf_to_input, outcomes_factual,
outcomes_counter, args)
print('Median Child Dataset Test Probability of Necessity and Sufficiency : {}'.format(prob_nec_suf_3))
if not args.train:
return (prob_necessity_1, prob_necessity_2, prob_necessity_3), (prob_suf_1,prob_suf_2, prob_suf_3), (prob_nec_suf_1,prob_nec_suf_2, prob_nec_suf_3)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=bool, default=False)
parser.add_argument('--inference_name',
default='twin_net_arch_lattice_azlink_none_uy_none_uy_monotonicity_none_z_monoton_opt_2_z_layer_multiple_none_calib_units_4_4_z_4_lr_0_001_loss_mse_weighted_kenyan_confounders_3_confs')
parser.add_argument('--prob_type', default='paper')
# Logging
parser.add_argument('--restore', type=bool, default=False)
parser.add_argument('--log_root', type=str, default='./experiments/KenyanWater/Twin')
# Dataset Hparams
parser.add_argument('--save_path', default='./data/Datasets/')
parser.add_argument('--save_name', default='kenyan_water_proc.pkl')
parser.add_argument('--save_dataset', default=False)
parser.add_argument('--load_dataset', default=True)
parser.add_argument('--dataset_mode', type=str, default='synthetic')
parser.add_argument('--path_to_data',
default='../data/Datasets/kenyan_water_proc_single_uy_normal_with_propensity.pkl')
# default='./data/Datasets/synthetic_dataset_200000_samples_X_bernouli_Uy_normal_with_counterfactual.pkl')
# parser.add_argument('--confounders', default=['base_age','splnecmpn_base','latrine_density_base','numkids_base'])
# parser.add_argument('--confounders', default=['base_age','splnecmpn_base','e1_iron_roof_base','latrine_density_base','numkids_base'])
# parser.add_argument('--confounders', default='all')
parser.add_argument('--confounders', default=['base_age', 'splnecmpn_base', 'latrine_density_base'])
# parser.add_argument('--confounders', default=['splnecmpn_base','latrine_density_base'])
parser.add_argument('--u_distribution', default='normal')
# Model Hparams
parser.add_argument('--lattice_sizes', default=[4, 4])
parser.add_argument('--lattice_monotonicities', default=['increasing', 'increasing'])
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--lattice_units', type=int, default=1) # 1 or 2
parser.add_argument('--hidden_dims', type=int, default=4)
parser.add_argument('--calib_units', type=int, default=4)
parser.add_argument('--z_calib_units', type=int, default=4)
parser.add_argument('--layer', default='lattice')
parser.add_argument('--uy_layer', default='none')
parser.add_argument('--z_layer', default='none')
parser.add_argument('--uy_monotonicity', default='none')
parser.add_argument('--z_monotonicity', default='none')
parser.add_argument('--z_monot_opt', default=2)
parser.add_argument('--concats', type=bool, default=False)
parser.add_argument('--end_activation', default='none')
parser.add_argument('--loss', default='mse')
parser.add_argument('--multiple_confounders', default=True, help='split confounders')
# General
parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 1)')
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--workers', type=int, default=0)
args = parser.parse_args()
# GPU setup
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# Set Randomness
# Set Randomness
if args.seed == 0: args.seed = int(np.random.randint(0, 2 ** 32 - 1, (1,)))
print('seed', args.seed)
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
# Set logdirs
args.path_to_data = args.load_path = args.path_to_data.format(args.u_distribution)
args.runPath = os.path.join(args.log_root, args.inference_name)
if args.multiple_confounders:
# args.z_calib_units = 2
# args.z_calib_units = args.lattice_sizes[0]
z_monotonicity = 'opt_{}'.format(args.z_monot_opt)
args.z_monotonicity_base = eval('confounder_monotonicities_{}'.format(args.z_monot_opt))
else:
z_monotonicity = args.z_monotonicity
args.z_calib_units = len(args.confounders) if 'all' not in args.confounders else 8
if not args.train:
nec_test_prob = []
nec_median_paper_prob = []
nec_dataset_prob = []
suf_test_prob = []
suf_median_paper_prob = []
suf_dataset_prob = []
nec_and_suf_test_prob = []
nec_and_suf_median_paper_prob = []
nec_and_suf_dataset_prob = []
for i in range(1, 20):
prob_necessities,prob_sufficiencies, prob_both = run_inference(copy.deepcopy(args))
nec_test_prob.append(prob_necessities[0])
nec_median_paper_prob.append(prob_necessities[1])
nec_dataset_prob.append(prob_necessities[2])
suf_test_prob.append(prob_sufficiencies[0])
suf_median_paper_prob.append(prob_sufficiencies[1])
suf_dataset_prob.append(prob_sufficiencies[2])
nec_and_suf_test_prob.append(prob_both[0])
nec_and_suf_median_paper_prob.append(prob_both[1])
nec_and_suf_dataset_prob.append(prob_both[2])
print('\n \nTest average Prob of Necessity {}, std: {}'.format(np.array(nec_test_prob).mean(), np.array(nec_test_prob).std()))
print('Paper Median average Prob of Necessity {}, std: {}'.format(np.array(nec_median_paper_prob).mean(),
np.array(nec_median_paper_prob).std()))
print('Dataset median average Prob of Necessity {}, std: {}'.format(np.array(nec_dataset_prob).mean(),
np.array(nec_dataset_prob).std()))
print('Test average Prob of Sufficiency {}, std: {}'.format(np.array(suf_test_prob).mean(),
np.array(suf_test_prob).std()))
print('Paper Median average Prob of Sufficiency {}, std: {}'.format(np.array(suf_median_paper_prob).mean(),
np.array(suf_median_paper_prob).std()))
print('Dataset median average Prob of Sufficiency {}, std: {}'.format(np.array(suf_dataset_prob).mean(),
np.array(suf_dataset_prob).std()))
print('Test average Prob of Necessity and Sufficiency {}, std: {}'.format(np.array(nec_and_suf_test_prob).mean(),
np.array(nec_and_suf_test_prob).std()))
print('Paper Median average Prob of Necessity and Sufficiency {}, std: {}'.format(np.array(nec_and_suf_median_paper_prob).mean(),
np.array(nec_and_suf_median_paper_prob).std()))
print('Dataset median average Prob of Necessity and Sufficiency {}, std: {} \n \n'.format(np.array(nec_and_suf_dataset_prob).mean(),
np.array(nec_and_suf_dataset_prob).std()))
else:
run_inference(args)
# raise NotImplementedError
|
987,850 | d06aa78e7c7cf5eb44e2b3d630cb3f5084bf760d | class Employee:
def __init__(self, id, firstName, lastName, salary):
self.id = id
self.firstName = firstName
self.lastName = lastName
self.salary = salary
def getID(self):
return self.id
def getFirstName(self):
return self.firstName
def getLastName(self):
return self.lastName
def getName(self):
return self.firstName, self.lastName
def getSalary(self):
return self.salary
def getAnnualSalary(self):
return self.salary * 12
def raiseSalary(self, percent):
self.salary = self.salary * percent
return self.salary
def toString(self):
ret = "Employee[id=" + self.id + ",name="\
+ self.firstName+" " + self.lastName + ", salary =" + self.salary + "]"
return ret
class InvoiceItem:
def __init__(self, id, desc, qty, unitPrice):
self.id = id
self.desc = desc
self.qty = qty
self.unitPrice = unitPrice
def getID(self):
return self.id
def getDesc(self):
return self.desc
def getQty(self):
return self.qty
def setQty(self, val):
self.qty = val
def getUnitPrice(self):
return self.unitPrice
def setUnitPrice(self, val):
self.unitPrice = val
def getTotal(self):
return self.unitPrice * self.qty
def toString(self):
ret = "InvoiceItem[id="+ self.id + ",desc="\
+self.desc+", qty " + self.qty + ", unitPrice ="+ self.unitPrice +"]"
return ret
class Account:
def __init__(self, id, name, balance = 0):
self.id = id
self.name = name
self.balance = balance
def getID(self):
return self.id
def getName(self):
return self.name
def getBalance(self):
return self.balance
def credit(self, amount):
self.balance = self.balance + amount
return self.balance
def debit(self, amount):
if amount <= self.balance:
self.balance = self.balance - amount
else:
print("Amount exceeded balance")
return self.balance
def transferTo(self, Account, amount):
if amount <= self.balance:
self.balance -= amount
Account.balance += amount
else:
print("Amount exceeded balance")
return self.balance
def toString(self):
ret = "Account[id=" + self.id + ",name=" + self.name + ",balance=" + self.balance + "]"
return ret
class Date:
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
def getDay(self):
return self.day
def getMonth(self):
return self.month
def getYear(self):
return self.year
def setDay(self, day):
self.day = day
def setMonth(self, month):
self.month = month
def setYear(self, year):
self.year = year
def setDate(self, day, month, year):
self.day = day
self.month = month
self.year = year
def toString(self):
ret = "" + self.day + "/" + self.month + "/" + self.year
return ret
class Time:
def __init__(self, hour, minute, second):
self.hour = hour
self.minute = minute
self.second = second
def getHour(self):
return self.hour
def getMinute(self):
return self.minute
def getSecond(self):
return self.second
def setHour(self, hour):
self.hour = hour
def setMinute(self, minute):
self.minute = minute
def setSecond(self, second):
self.second = second
def setTime(self, hour, minute, second):
self.hour = hour
self.minute = minute
self.second = second
def toString(self):
ret = "" + self.hour + ":" + self.minute + ":" + self.second
return ret
def nextSecond(self):
self.second += 1
return self
def previousSecond(self):
self.second -= 1
return self
|
987,851 | eccd41c56c6de43897a7ebcebbf2f2a15cc33f21 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 18:30:48 2020
@author: Cati
"""
import numpy
from random import randint, random
from sys import maxsize
from copy import deepcopy
class Population :
def __init__(self, n, popSize, prM, prC):
self.squareSize = n
self.popSize = popSize
self.prM = prM
self.prC = prC
self.population = self.getPopulation()
def getPopulation(self):
population = []
for x in range(self.popSize):
indS = self.getIndividual()
indT = self.getIndividual()
population.append([indS, indT])
return population
def getIndividual(self):
el = (numpy.random.permutation(self.squareSize)+1 for x in range(self.squareSize))
listP = []
for i in el:
a = list(j for j in i)
listP.append(a)
return listP
def getPopSize(self):
return self.popSize
#crossover function: from 2 parent matrices, we change some elements so that it will result their child
def crossover(self, parent1, parent2):
cross = random()
indS = []
indT = []
if self.prC > cross:
t1 = randint(0,self.squareSize-1)
t2 = randint(0,self.squareSize-1)
while t2 < t1:
t2 = randint(0,self.squareSize-1)
for i in range(t1+1):
indS.append(parent1[0][i])
indT.append(parent1[1][i])
for i in range(t1+1, t2+1):
indS.append(parent2[0][i])
indT.append(parent2[1][i])
for i in range(t2+1, self.squareSize):
indS.append(parent1[0][i])
indT.append(parent1[1][i])
kid = [indS, indT]
return kid
#mutation function: changing an element from an individ with another random permutation
def mutate(self, chr):
mutate = random()
if self.prM > mutate and chr != [[],[]]:
t = randint(0,self.squareSize-1)
pS = list(numpy.random.permutation(self.squareSize)+1)
pT = list(numpy.random.permutation(self.squareSize)+1)
chr[0][t] = pS
chr[1][t] = pT
return chr
#fitness function that checks if there are as little comom elements as possible on rows and columns
def fitness(self, chr):
if chr == [[],[]]:
return maxsize
errors = 0
indS = chr[0]
indT = chr[1]
for i in range(self.squareSize):
lstS = []
lstT = []
for j in range(self.squareSize):
lstS.append(indS[j][i])
lstT.append(indT[j][i])
lstS.sort()
lstT.sort()
for i in range(self.squareSize):
if lstS[i] != i+1:
errors = errors + 1
if lstT[i] != i+1:
errors = errors + 1
for i in range(self.squareSize):
for j in range(self.squareSize):
for k in range(self.squareSize):
for l in range(self.squareSize):
if i != k or j != l:
if indS[i][j] == indS[k][l] and indT[i][j] == indT[k][l]:
errors = errors + 1
return errors
def getPopulationn(self):
return self.population
def getSquare(self, chr):
indS = chr[0]
indT = chr[1]
s = ""
for i in range(self.squareSize):
for j in range(self.squareSize):
s += "("+str(indS[i][j])+", "+str(indT[i][j])+")"+" "
s+="\n"
return s
def iteration(self):
kids = []
ind1 = randint(0, self.popSize-1)
ind2 = randint(0, self.popSize-1)
if ind1 != ind2:
parent1 = self.population[ind1]
parent2 = self.population[ind2]
chr = self.crossover(parent1, parent2)
chr = self.mutate(chr)
f1 = self.fitness(parent1)
f2 = self.fitness(parent2)
fc = self.fitness(chr)
if f1>f2 and f1>fc:
self.population[ind1]=chr
if f2>f1 and f2>fc:
self.population[ind2]=chr
return self.population
|
987,852 | 2b2620497a66b3d24dea53337aee94b422ee5cc5 | #!/usr/bin/env python
###
#
# This script creates a 3D vector map, 2D vector maps for each height
# using data from the micro-PIV experiments. It averages over each height
# and combines this into a single graph. It can also write the averaged
# velocities to a new file.
#
# Author: Callum Kift
#
# Directory setup:
# main_dir > different_height_directories + height_file.txt> txt files for given height
#
###
import os
import collections
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # Needed for 3D plot, 'projection=3d'
def sqr(a):
"""
Returns the square of the input.
:param a: integer or float
:return: param a squared
"""
return a * a
def read_hf():
"""
Reads the height file
:return: an array containing the heights of the experiment
"""
hf = main_dir + "height_file.txt"
height_list = []
with open(hf, 'r') as f:
for line in f:
line = line.strip()
column = line.split()
if len(column) == 1:
height_list.append(float(column[0]))
else:
print "Error: height file has wrong format!"
return
return np.array(height_list)
def get_subdirectories(a_dir):
"""
Retrieves the sub-directories of the given directory.
:param a_dir: path to directory
:return: A list of the subdirectories.
"""
return [a_dir + name + "/" for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_files(a_dir):
"""
Creates a list of all txt files in the given directory
:param a_dir: Path to a directory
:return: a list of all txt files in the directory
"""
gf = []
for file in os.listdir(a_dir):
if file.endswith(".txt"):
gf.append(a_dir + "/" + str(file))
if len(gf) != 0:
return gf
else:
print "Error: Cannot find TXT files in subdirectory!\n\t (%s)" % a_dir
def get_data(eh, file_list):
"""
Reads the data in the list of files, averages them and has the possibility of making
subgrid values.
:param eh: Experimental height
:param file_list: List of files corresponding to the given height
:return: An array of positions and velocities
"""
x_pos = []
y_pos = []
x_vel = []
y_vel = []
z_vel = []
unique_x = []
unique_y = []
# reading data
for file in file_list:
with open(file, 'r') as f:
f.readline() # Ignores first line
for line in f:
line = line.strip()
column = line.split()
if len(column) == 4:
if file == file_list[0]:
# Only takes position data from first file as the same in each file
x_pos.append(float(column[0]))
y_pos.append(float(column[1]))
x_vel.append(float(column[2]))
y_vel.append(float(column[3]))
z_vel.append(0.0)
if float(column[0]) not in unique_x:
unique_x.append(float(column[0]))
if float(column[1]) not in unique_y:
unique_y.append(float(column[1]))
else:
x_vel.append(float(column[2]))
y_vel.append(float(column[3]))
z_vel.append(0.0)
else:
print "Error: TXT file is not correct!"
ux = len(unique_x)
uy = len(unique_y)
# xmid and ymid are used to get xz- and yz-planes. The median value is used. If the number of
# unique xs and ys is even, then the median value will be one that does not correspond to a
# measurement. When this is the case, the first value is ignored so that the number of uniques is
# odd, resulting in a median value that corresponds to a measurement.
if ux % 2 == 0:
xmid = np.median(unique_x[1:])
else:
xmid = np.median(unique_x)
if uy % 2 == 0:
ymid = np.median(unique_y[1:])
else:
ymid = np.median(unique_y)
if eh == exp_h_list[-1]:
print "All data read."
# checks list lengths to ensure matching and then averages the velocities for all files
# and then returns an array with position and average velocities
if len(x_pos) == len(y_pos):
pos_count = len(x_pos)
if len(x_vel) == len(y_vel) and len(x_vel) == len(z_vel):
vel_count = len(x_vel)
nof = vel_count / pos_count # equals number of files for each height
ax_vel, ay_vel, az_vel = avg_data_each_h(nof, pos_count, x_vel, y_vel, z_vel)
if make_sg:
subgrid_array = sub_grid(ux, x_pos, y_pos, eh, ax_vel, ay_vel, az_vel)
return subgrid_array
else:
z_pos = [eh] * len(x_pos)
return xmid, ymid, zip(x_pos, y_pos, z_pos, ax_vel, ay_vel, az_vel)
else:
print "Error: different number of velocities!"
else:
print "Error: not all x-positions have a corresponding y-position!"
def avg_data_each_h(nof, lof, x_vel, y_vel, z_vel):
"""
Averages the components of the velocities
:param nof: Number of files for each height
:param lof: Length of the data set in each file
:param x_vel: x_velocity
:param y_vel: y_velocity
:param z_vel: z_velocity
:return: Three arrays: averaged x-/y-/z- velocities
"""
sx_vel = []
sy_vel = []
sz_vel = []
for i in range(lof):
sx = 0
sy = 0
sa = 0
for j in range(nof):
sx += x_vel[i + (j * nof)]
sy += y_vel[i + (j * nof)]
sa += z_vel[i + (j * nof)]
sx_vel.append(sx)
sy_vel.append(sy)
sz_vel.append(sa)
# checks lengths match and then averages them and returns the average velocities
if len(sx_vel) == len(sy_vel) and len(sx_vel) == len(sz_vel):
if len(sx_vel) == lof:
ax_vel = np.array(sx_vel) / nof
ay_vel = np.array(sy_vel) / nof
az_vel = np.array(sz_vel) / nof
return ax_vel, ay_vel, az_vel
else:
print "Error: summed velocity array is the wrong length!"
else:
print "Error: summed velocity data not matching!"
def sub_grid(unique_x, xpos, ypos, zpos, axvel, ayvel, azvel):
"""
Creates an n*n subgrid
:param unique_x: Number of unique x positions
:param xpos: Array of x_positions
:param ypos: Array of y_positions
:param zpos: Array of z_positions
:param axvel: Array of the average x_velocity corresponding to the positions
:param ayvel: Array of the average y_velocity corresponding to the positions
:param azvel: Array of the average z_velocity corresponding to the positions
:return: An array containing the average parameter value for each subgrid
"""
n = sgs
ssgh_array = []
i = 0
while i + n + ((n - 1) * unique_x) < len(xpos):
# Makes sure that subgrid can be fromed
sxp = 0
syp = 0
szp = 0
sxv = 0
syv = 0
szv = 0
for j in range(n):
for k in range(n):
sxp += xpos[i + j + (k * unique_x)]
syp += ypos[i + j + (k * unique_x)]
szp += zpos
sxv += axvel[i + j + (k * unique_x)]
syv += ayvel[i + j + (k * unique_x)]
szv += azvel[i + j + (k * unique_x)]
ssgh_array.append([sxp, syp, szp, sxv, syv, szv])
if (i + n) < len(xpos):
i += n
else:
pl = unique_x - (i % unique_x)
i += pl + ((n - 1) * unique_x)
return np.array(ssgh_array) / sqr(n)
def dict_to_array(dict_array):
"""
Transforms a dictionary to an array
:param dict_array: Dictionary
:return: Array
"""
plottable_array = []
for k in dict_array:
for i in range(len(dict_array[k])):
plottable_array.append(dict_array[k][i])
return np.array(plottable_array)
def plot_3d_vector(pa):
"""
Plots 3D vector graph
:param pa: Array containing [x_position, y_pos, z_pos x_velocity, y_vel, z_vel]
:return: n/a
"""
# Changeable variables
al = 0.01 # arrow length
rgba = (0.3, 0.3, 0.3, 0.8) # rgba for panels
lw = 1.5 # changes thickness of arrow
X, Y, Z, U, V, W = zip(*pa)
A = np.sqrt(np.power(X, 2) + np.power(Y, 2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
q = ax.quiver(X[::peo3], Y[::peo3], Z[::peo3], U[::peo3], V[::peo3], W[::peo3], A,
length=al, lw=lw)
q.set_array(np.random.rand(10))
plt.colorbar(q)
ax.w_xaxis.set_pane_color(rgba)
ax.w_yaxis.set_pane_color(rgba)
ax.w_zaxis.set_pane_color(rgba)
ax.set_zlabel("Height")
ax.set_title(r"$\mu$-PIV vector plot, %s, %s" % (shark_species, sample_area))
plt.show()
return
def plot_2d_vector(dicti):
"""
Produces all the 2D plots from the given dictionary. These plots are the 2D vector
plots and the mean velocity for each height.
:param dicti: Dictionary; for each height there is a corresponding array of
[x_position, y_pos, z_pos x_velocity, y_vel, z_vel]
:return: n/a
"""
hcount = 0
for k in dicti:
pa2d = []
for i in range(len(dicti[k])):
if dicti[k][i][2] == exp_h_list[hcount]:
pa2d.append(
[dicti[k][i][0], dicti[k][i][1], dicti[k][i][3],
dicti[k][i][4]])
X, Y, U, V = zip(*pa2d)
A = np.sqrt(np.power(X, 2.0) + np.power(Y, 2.0))
fig = plt.quiver(X[::peo2], Y[::peo2], U[::peo2], V[::peo2], A)
plt.colorbar(fig)
plt.title(
r"$\mu$-PIV vector plot at height %.3f, %s, %s" % (exp_h_list[hcount], shark_species,
sample_area))
plt.xlabel(
r"Average velocity: (%.3f $\bar{x}$ + %.3f $\bar{y}$) $ms^{-1}$" % (
np.mean(U), np.mean(V)))
plt.show()
hcount += 1
return
def plane_plots(xpv, ypv, dicti):
"""
Makes planar vector plots of the xz- and yz-planes
:param xpv: x-position for yz-plane
:param ypv: y-position for xz-plane
:param dicti: Dictionary; for each height there is a corresponding array of
[x_position, y_pos, z_pos x_velocity, y_vel, z_vel]
:return: n/a
"""
xz = []
yz = []
for k in dicti:
for i in range(len(dicti[k])):
if dicti[k][i][1] == ypv:
xz.append([dicti[k][i][0], dicti[k][i][2], dicti[k][i][3], dicti[k][i][5]])
if dicti[k][i][0] == xpv:
yz.append([dicti[k][i][1], dicti[k][i][2], dicti[k][i][4], dicti[k][i][5]])
xzx, xzz, xzxv, xzzv = zip(*xz)
yzy, yzz, yzyv, yzzv = zip(*yz)
axv, exv= mean_vel(xzxv, xzz)
ayv, eyv = mean_vel(yzyv, yzz)
# Changes height values to distance from dermal denticles.
dfdd = np.array(abs(exp_h_list) - np.amin(abs(exp_h_list)))
xzz = np.array(abs(np.array(xzz)) - np.amin(abs(np.array(xzz))))
yzz = np.array(abs(np.array(yzz)) - np.amin(abs(np.array(yzz))))
f, axarr = plt.subplots(2,2, sharey=True)
fig = axarr[0,0].quiver(xzx, xzz, xzxv, xzzv, yzyv)
# Colour uses yzyv so that it matches the other plot
axarr[0,0].set_xlabel("x")
axarr[0,0].set_ylabel(r"Height from dd ($mm$)")
axarr[0,1].errorbar(100*axv, dfdd, xerr=100*exv, marker='o', color='g')
axarr[0,1].plot([0.0, 0.0], [np.amin(dfdd), np.amax(dfdd)], 'k--')
axarr[0,1].set_xlabel(r"x-velocity ($\times 10^{-2}ms^{-1}$)")
axarr[1,0].quiver(yzy, yzz, yzyv, yzzv, yzyv)
axarr[1,0].set_xlabel("y")
axarr[1,0].set_ylabel(r"Height from dd ($mm$)")
axarr[1,1].errorbar(100*ayv, dfdd, xerr=100*eyv, marker='o', color='b')
axarr[1,1].plot([0.0, 0.0], [np.amin(dfdd), np.amax(dfdd)], 'k--')
axarr[1,1].set_xlabel(r"y-velocity ($\times 10^{-2}ms^{-1}$)")
cax, kw = mpl.colorbar.make_axes([ax for ax in axarr.flat])
f.colorbar(fig, cax=cax, **kw)
cbar = mpl.colorbar.ColorbarBase(cax, norm=mpl.colors.Normalize(vmin=-0.1, vmax=0.1))
cbar.set_clim(-0.1,0.1)
f.suptitle(r"$\mu$-PIV for the %s (%s). Flow direction: %s" %(shark_species, sample_area,
flow_direction))
plt.show()
if w2f:
write_mean_to_file(dfdd, axv, ayv)
return
def mean_vel(vel_array, z_array):
ava = []
eva = []
for i in range(len(exp_h_list)):
va = []
for j in range(len(z_array)):
if z_array[j] == exp_h_list[i]:
va.append(vel_array[j])
ava.append(np.mean(va))
eva.append(np.std(va))
if sem_bar:
return np.array(ava), np.array(eva) / len(eva)
elif sd_bar:
return np.array(ava), np.array(eva)
def write_mean_to_file(height, x_vel, y_vel):
"""
Writes the average velocities for each height to a file
:param height: list of experimental heights
:param x_vel: list of average x-velocities
:param y_vel: list of average y_velocities
:return: n/a
"""
mean_dir = path2dir + "mean_vels/"
mean_file = mean_dir + exp_dir + ".txt"
if not os.path.exists(mean_dir):
os.makedirs(mean_dir)
print "Creating mean_vels directory. It is a subfolder of \n %s" %path2dir
if os.path.exists(mean_file):
print "\nN.B. Mean velocity file for this data already exists. File not written."
else:
f = open(mean_file, "w")
for i in range(len(height)):
if i == 0:
f.write("height\tx-velocity\ty_velocity\n")
if i == len(height):
f.write("%s\t%s\t%s" %(height[i], x_vel[i], y_vel[i]))
else:
f.write("%s\t%s\t%s\n" %(height[i], x_vel[i], y_vel[i]))
f.close()
print "\nAverage values written to file."
return
if __name__ == '__main__':
path2dir = "/home/callumkift/Documents/sharks_dtu/micro_piv/"
exp_dir = "20150716_x10_empty"
main_dir = path2dir + exp_dir + "/"
# main dir where all the subdirs with the data are
shark_species = "Empty chamber"
sample_area = ""
flow_direction = "-ve y."
exp_h_list = read_hf() # vertical heights of PIV
lehl = len(exp_h_list)
make_sg = False # True -> makes subgrids
sgs = 3
plot3D = False # True -> plots 3D vector
peo3 = 10 # plots every nth vector for the 3D plot
plot2D = False # True -> plots 2D vector plots for all heights
peo2 = 2 # plots every nth vector for the 2D plot
# At least one must be True
sem_bar = True # plots standard error on mean bars on 2d_mean_roi graph
sd_bar = False # plot standard deviation bars on 2d_mean_roi graph
# If both true, sem will be plotted
w2f = True # If true, will write the mean velocities for each height to a new file
# This file will be in a directory in path2dir and be called mean_vels. The files name
# will match that of exp_dir.
# Median x/y values for our data. Revalued below.
midx = 0
midy = 0
if sem_bar or sd_bar:
if lehl != 0:
sub_dirs = get_subdirectories(main_dir)
# Store files of each subdir in a dictionary
height_file_dict = {}
for dir in sub_dirs:
height_file_dict["height{0}".format(dir)] = get_files(dir)
height_file_dict = collections.OrderedDict(
sorted(height_file_dict.items())) # sorts dictionary by subdir
# Stores an array filled with position and velocities for each height
if len(height_file_dict) == lehl:
print "\nreading and manipulating data ..."
h_pos_vel_dict = {}
hcount = 0
for k in height_file_dict:
# Avoids bug when more than 10 height measurements
if hcount < 10:
fhc = "0" + str(hcount)
else:
fhc = str(hcount)
midx, midy, h_pos_vel_dict["height{0}".format(fhc)] = get_data(exp_h_list[hcount],height_file_dict[k])
hcount += 1
h_pos_vel_dict = collections.OrderedDict(sorted(h_pos_vel_dict.items()))
if plot3D:
pa = dict_to_array(h_pos_vel_dict)
plot_3d_vector(pa)
plane_plots(midx, midy, h_pos_vel_dict)
if plot2D:
plot_2d_vector(h_pos_vel_dict)
else:
print "\nError: height list does not match number of subdirectories containing files!"
print "Subdirectories: %d, Height list: %d" %(len(sub_dirs), lehl)
else:
print "\nError: experimental height measurements not given!"
else:
print "\nError: please choose which error bars to plot"
|
987,853 | e7d86fb7f47f6d2449685d4ea5e019683d681d42 | #!/usr/bin/env python
'''
rotate a structure to satisfy the ssneb requirements:
cell[0] along the x axis and cell[1] on the xoy plane
'''
from ase.io import read,write
import sys
finput = sys.argv[1]
p1 = read(finput,format='vasp')
a = p1.get_cell()
print a[0]
p1.rotate(a[0],'x',center=(0,0,0),rotate_cell=True)
a = p1.get_cell()
b = a[1]
b[0] = 0.0
p1.rotate(b,'y',center=(0,0,0),rotate_cell=True)
write(finput+'_rotated',p1,format='vasp')
|
987,854 | 4593012c3cf2f35ef98a48e4f3c403856c22c487 | number = input('Please input an integer.')
#Try to cast the input
try:
number = int(number)
#Catch the raised exeption if there is an error
except ValueError as e:
print("Your input is not an ingeter.")
print(e)
#Otherwise, there is no error
else:
print(str(number) + " is indeed an integer!")
|
987,855 | 6c60e22666652fbe9571cb13fb1082f22bd25a52 | import hou
import hou_rig.my_null
import master
class character_placer(master.master):
def __init__(self, node, rig_subnet):
print "character placer"
self.node = node
self.rig_subnet = rig_subnet
self.limb_subnet = self.create_rig_node(self.rig_subnet)
self.node_rename(self.node, self.limb_subnet)
self.position_node(self.node, self.limb_subnet)
null_ch = self.limb_subnet.createNode("null", "character_placer")
null_ch.parm("controltype").set(1)
null_ch.parm("geoscale").set(2)
null_ch.parm("orientation").set(2)
hook = hou_rig.my_null.my_null(self.limb_subnet, "hook_chain_0")
#hook = hou_rig.my_null.my_null(self.subnet, "character_placer_hook")
hook.setInput(0, null_ch)
hook.setColor(hou.Color((0.15, 0.15, 0.15)))
hook.parm("geoscale").set(1)
hook.parm("controltype").set(3)
hook.parm("orientation").set(2)
self.parameters_ui()
self.connect_parms()
self.limb_subnet.layoutChildren()
def parameters_ui(self):
# opening the template group
tg_parms = self.rig_subnet.parmTemplateGroup()
ch_p_folder_parms = hou.FolderParmTemplate(self.node.name()+"character_placer", "character_placer")
# creating the parms
for i in self.limb_subnet.children():
hou_parm_template = hou.FloatParmTemplate(i.name()+"_t", "Translate", 3, default_value=([0, 0, 0]),
min=0, max=10, min_is_strict=False, max_is_strict=False,
look=hou.parmLook.Regular,
naming_scheme=hou.parmNamingScheme.XYZW)
ch_p_folder_parms.addParmTemplate(hou_parm_template)
hou_parm_template = hou.FloatParmTemplate(i.name()+"_r", "Rotate", 3, default_value=([0, 0, 0]), min=0,
max=360, min_is_strict=False, max_is_strict=False,
look=hou.parmLook.Regular, naming_scheme=hou.parmNamingScheme.XYZW)
ch_p_folder_parms.addParmTemplate(hou_parm_template)
hou_parm_template = hou.FloatParmTemplate(i.name()+"_s", "Scale", 3, default_value=([1, 1, 1]), min=0,
max=10, min_is_strict=False, max_is_strict=False,
look=hou.parmLook.Regular, naming_scheme=hou.parmNamingScheme.XYZW)
ch_p_folder_parms.addParmTemplate(hou_parm_template)
hou_parm_template = hou.FloatParmTemplate(i.name()+"_p", "Pivot", 3, default_value=([0, 0, 0]), min=0,
max=10, min_is_strict=False, max_is_strict=False,
look=hou.parmLook.Regular, naming_scheme=hou.parmNamingScheme.XYZW)
ch_p_folder_parms.addParmTemplate(hou_parm_template)
hou_parm_template = hou.SeparatorParmTemplate(i.name()+"_sep")
ch_p_folder_parms.addParmTemplate(hou_parm_template)
#finding the folder to put the parms and closing the template
folder_node = tg_parms.findFolder(["Rig Parms"])
tg_parms.appendToFolder(folder_node, ch_p_folder_parms)
self.rig_subnet.setParmTemplateGroup(tg_parms)
def connect_parms(self):
list_of_parms = ["t", "r", "s", "p"]
list_of_vectors = ["x", "y", "z"]
for i in self.limb_subnet.children():
for j in list_of_parms:
for z in list_of_vectors:
i.parm(j+z).setExpression('ch("'+i.relativePathTo(self.rig_subnet)+"/"+i.name()+"_"+j+z+'")') |
987,856 | c353a1655c9bc0b15bbd6cc6d4d49d86c5147d6b | # coding: utf-8
# pylint: disable=W0611
"""Compatibility layer"""
# optional support for Pandas: if unavailable, define a dummy class
try:
from pandas import DataFrame
PANDAS_INSTALLED = True
except ImportError:
class DataFrame(): # pylint: disable=R0903
"""dummy for pandas.DataFrame"""
PANDAS_INSTALLED = False
__all__ = []
|
987,857 | 6ad8c39d7934ebfea439dfc804791069c64a441a | city = "Miami"
event = "Concert"
print("Welcome to " + city + " and enjoy the " + event)
#This same pring statement can be rewritten as seen below
print("Welcome to %s and enjoy the %s" %(city, event))
#We can use the same approach when it's just one variable involved
print("Welcome to %s" % city) |
987,858 | b2aef432d62788489695f23d6f42f097e2384ee0 | # Pandas example
import pandas as pd
import numpy as np
# Construct a dataframe from a dictionary
data = {
"col1": [1,2,3],
"col2": [5,3,2]
}
dataframe = pd.DataFrame(data)
print("Dictionary frame: ")
print(dataframe)
# Using numpy arrays
data2 = np.array([(1,2,3), (4,5,6)])
df2 = pd.DataFrame(data2)
print("Numpy array: ")
print(df2)
# Open .csv files
datafile = pd.read_csv("~/consumers-price-index-June-2021-quarter-index-numbers.csv")
print("CSV File head: ")
print(datafile.head())
|
987,859 | a2e116afacbf2e95bcbe18974310697b9c188852 | """
Tests for accessing methods through classes and instances
"""
import objc
from PyObjCTest.clinmeth import PyObjC_ClsInst1, PyObjC_ClsInst2
from PyObjCTools.TestSupport import TestCase
class TestClassMethods(TestCase):
# Some very basic tests that check that getattr on instances doesn't
# return a class method and that getattr on classes prefers classmethods
# over instance methods (and v.v. for getattr on instances)
def testViaClass(self):
m = PyObjC_ClsInst1.clsmeth
self.assertIsInstance(m, objc.selector)
self.assertTrue(m.isClassMethod)
self.assertEqual(m(), 4)
def testViaInstance(self):
o = PyObjC_ClsInst1.alloc().init()
with self.assertRaisesRegex(
AttributeError, "'PyObjC_ClsInst1' object has no attribute 'clsmeth'"
):
o.clsmeth
def testClassAndInstanceViaClass(self):
m = PyObjC_ClsInst1.both
self.assertIsInstance(m, objc.selector)
self.assertTrue(m.__metadata__()["classmethod"])
self.assertEqual(m(), 3)
def testClassAndInstanceViaInstance(self):
o = PyObjC_ClsInst1.alloc().init()
m = o.both
self.assertTrue(isinstance(m, objc.selector))
self.assertTrue(not m.isClassMethod)
self.assertEqual(m(), 2)
class TestInstanceMethods(TestCase):
# Check that instance methods can be accessed through the instance, and
# also through the class when no class method of the same name is
# available.
def testViaClass(self):
m = PyObjC_ClsInst1.instance
self.assertTrue(isinstance(m, objc.selector))
self.assertTrue(not m.isClassMethod)
with self.assertRaisesRegex(TypeError, "Missing argument: self"):
m()
def testViaInstance(self):
o = PyObjC_ClsInst1.alloc().init()
m = o.instance
self.assertIsInstance(m, objc.selector)
self.assertFalse(m.isClassMethod)
self.assertEqual(m(), 1)
class TestSuper(TestCase):
# Tests that check if super() behaves as expected (which is the most likely
# reason for failure).
def testClassMethod(self):
cls = PyObjC_ClsInst2
self.assertEqual(cls.clsmeth(), 40)
self.assertEqual(objc.super(cls, cls).clsmeth(), 4)
def testInstanceMethod(self):
o = PyObjC_ClsInst2.alloc().init()
self.assertEqual(o.instance(), 10)
self.assertEqual(objc.super(PyObjC_ClsInst2, o).instance(), 1)
def testBoth(self):
o = PyObjC_ClsInst2.alloc().init()
self.assertEqual(o.both(), 20)
self.assertEqual(objc.super(PyObjC_ClsInst2, o).both(), 2)
cls = PyObjC_ClsInst2
self.assertEqual(cls.both(), 30)
self.assertEqual(objc.super(cls, cls).both(), 3)
|
987,860 | 571a564fcf82aded808c1deac462331092f9f0e8 | '''
------------------------------------------------------------------------
Last updated 6/3/2015
Firm functions for taxes in SS and TPI.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
import tax_funcs as tax
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def get_r(Y_now, K_now, params):
'''
Parameters: Aggregate output, Aggregate capital
Returns: Returns to capital
'''
J, S, T, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, tau_payroll, retire, mean_income_data, a_tax_income, b_tax_income, c_tax_income, d_tax_income, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = params
r_now = (alpha * Y_now / K_now) - delta
return r_now
def get_Y(K_now, L_now, params):
'''
Parameters: Aggregate capital, Aggregate labor
Returns: Aggregate output
'''
J, S, T, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, tau_payroll, retire, mean_income_data, a_tax_income, b_tax_income, c_tax_income, d_tax_income, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = params
Y_now = Z * (K_now ** alpha) * ((L_now) ** (1 - alpha))
return Y_now
def get_w(Y_now, L_now, params):
'''
Parameters: Aggregate output, Aggregate labor
Returns: Returns to labor
'''
J, S, T, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, tau_payroll, retire, mean_income_data, a_tax_income, b_tax_income, c_tax_income, d_tax_income, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = params
w_now = (1 - alpha) * Y_now / L_now
return w_now
def get_L(e, n, weights):
'''
Parameters: e, n
Returns: Aggregate labor
'''
L_now = np.sum(e * weights * n)
return L_now
|
987,861 | 819c11fb2ff6e9bbda0cb03380c26525458095b7 | -X FMLP -Q 0 -L 3 120 400
-X FMLP -Q 0 -L 3 93 400
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 1 -L 2 73 400
-X FMLP -Q 1 -L 2 63 250
-X FMLP -Q 2 -L 1 55 200
-X FMLP -Q 2 -L 1 45 400
-X FMLP -Q 3 -L 1 35 125
-X FMLP -Q 3 -L 1 35 150
22 100
21 100
|
987,862 | 31efbe592fc3314bb957983c19ae73bc0c41ce94 | from HighwayNetwork import HighwayNetwork
from random import randint, seed, random
########
#
# Initialize randomizer
#
########
import sys
#myseed = randint(0, sys.maxint)
myseed = 4909137950491786826
print "Random seed:", myseed
seed(myseed)
########
#
# Load data
#
########
import scipy.io
data = scipy.io.loadmat('sensitivity_sample.mat')
flows = [x[0] for x in data['x_true']]
routes = [map(tuple, x[0]) for x in data['paths_sampled']]
########
#
# Initialize cell towers
#
########
numCellTowers = 80
cellPositions = []
x = [s[0] for r in routes for s in r]
y = [s[1] for r in routes for s in r]
maxx = max(x)
maxy = max(y)
minx = min(x)
miny = min(y)
for i in xrange(numCellTowers):
cellPositions.append((minx + random() * (maxx - minx),
miny + random() * (maxy - miny)))
########
#
# Pick a route
#
########
index = 9
flows = [1]
routes = routes[index:index+1]
########
#
# Plot stuff
#
########
'''
from scipy.spatial import Voronoi, voronoi_plot_2d
import matplotlib.pyplot as plt
vor = Voronoi(cellPositions)
voronoi_plot_2d(vor)
plt.plot(*zip(*routes[0]), marker='.')
for i, pt in enumerate(cellPositions):
# Annotate the points 5 _points_ above and to the left of the vertex
plt.annotate('{}'.format(i), xy=pt, xytext=(0, 0), ha='center', va='center',
textcoords='offset points')
plt.show()
'''
########
#
# Simulation
#
########
# Run simulation
n = HighwayNetwork(cellPositions, flows, routes)
for f, rest, params in n.go(2000, 10, tlimit = 100, spread=[0, 1], inertia=[0, 1], balancing=[0, .1]):
print "spread = %f, inertia = %f, balancing = %f" % params
for cp, count in n.paths.iteritems():
print count, ":", cp
#print f, rest
print
|
987,863 | 836caac438a8724782c5339b4583d59010115bdd | import RPi.GPIO as GPIO
class Lcd:
def __init__(self, rs=2, e=3, db7=4, db6=14, db5=15, db4=18):
"""
Initialize GPIO pin numbers (BCM numbering).
RS selects register
0: instruction register
1: data register
E starts data read/write
Rising edge: read RS
Falling edge: read data bits
DB7 to DB0 data bits
"""
self.rs = rs
self.e = e
self.db7 = db7
self.db6 = db6
self.db5 = db5
self.db4 = db4
GPIO.setmode(GPIO.BCM)
GPIO.setup([self.rs, self.e, self.db7, self.db6, self.db5, self.db4], GPIO.OUT)
# Display on/off control
self.display_on_off = 1
self.cursor_on_off = 1
self.cursor_blink = 1
# Function set
self.data_length = 0 # 4 bit data length
self.numer_of_lines = 1 # two display lines
self.character_font = 0 # 5x8 lines font
def write_string(self, string):
for char in string:
self.write_char(char)
def write_char(self, char=' '):
""""Convert char to integer"""
integer = ord(char)
self.instruction(integer, True)
def return_home(self):
""""Clears entire display and sets DDRAM address 0 in address counter"""
self.instruction([False, False, False, False, False, False, True])
def entry_mode_set(self, id=True, s=False):
"""
Sets cursor move direction and specifies display shift.
These operations are performed during data write and read.
I/D = 1: Increment (default)
I/D = 0: Decrement
S = 1: Accompanies display shift
"""
self.instruction([False, False, False, False, False, True, bool(id), bool(s)])
def display_on_off_control(self, d=True, c=True, b=True):
"""
Sets entire display (D) on/off,
cursor on/off (C), and
blinking of cursor position character (B).
"""
self.instruction([False, False, False, False, True, bool(d), bool(c), bool(b)])
def cursor_or_display_shift(self, sc=True, rl=True):
"""
Moves cursor and shifts display without changing DDRAM contents
S/C = 1: Display shift (default)
S/C = 0: Cursor move
R/L = 1: Shift to the right (default)
R/L = 0: Shift to the left
"""
self.instruction([False, False, False, True, bool(sc), bool(rl)])
def _function_set(self, data_length=self.data_length, number_of_lines=self.number_of_lines, character_font=self.character_font):
"""
Sets interface data length (DL),
number of display lines (N),
and character font (F).
DL = 1: 8 bits
DL = 0: 4 bits
N = 1: 2 lines (default)
N = 0: 1 line
F = 1: 5x10 dots
F = 0: 5x8 dots (default)
"""
function_set_mask = 32
data = funtion_set_mask | (data_length << 4) | (number_of_lines << 3) | (character_font << 2)
self.instruction(data)
def set_ddram_address(self, address=0, line=0):
"""
Sets DDRAM address.
DDRAM data is sent and
received after this setting
address: 0 to 39 dec
line: 0 or 1
"""
# instruction bit
data = [True]
# select wich line (0/1)
data.append(bool(line))
# integer address to binary list
address_bin = list(map(bool, list(map(int, bin(address)[2:].zfill(6)))))
data.extend(address_bin)
self.instruction(data)
def instruction(self, data, rs=False, data_length=0):
"""
HD44780U instruction
RS = 0: Instruction register
RS = 1: Data register
DB7 to DB0: Data bits
"""
GPIO.output(self.rs, rs)
# higher order bits
GPIO.output(self.e, True)
GPIO.output(self.db7, bool((data >> 7) & 1))
GPIO.output(self.db6, bool((data >> 6) & 1))
GPIO.output(self.db5, bool((data >> 5) & 1))
GPIO.output(self.db4, bool((data >> 4) & 1))
GPIO.output(self.e, False)
if(data_length == 0):
# 4 bit data mode
# lower order bits
GPIO.output(self.e, True)
GPIO.output(self.db7, bool((data >> 3) & 1))
GPIO.output(self.db6, bool((data >> 2) & 1))
GPIO.output(self.db5, bool((data >> 1) & 1))
GPIO.output(self.db4, bool(data & 1))
GPIO.output(self.e, False)
def cleanup(self):
""""Clean up all ports"""
print('GPIO cleanup...')
GPIO.cleanup()
|
987,864 | bb63813e2527f2f9666d467ad59b7ad982250e30 | # Dakota Bourne db2nb
"""
The purpose of this program is to take user inputs and depending on the input for the answer, either generate a new
number or use the number given by the user to play a guessing game.
"""
import random
answer = int(input("What should the answer be? "))
guesses = int(input("How many guesses? "))
if answer == -1:
answer = random.randrange(1, 100)
while guesses != 0:
guesses -= 1
guess = float(input("guess a number: "))
if guess < answer:
print("The number is higher than that.")
elif guess > answer:
print("The number is lower than that.")
else:
print("You win!")
guesses = 0
if guesses == 0 and guess != answer:
print("You lose; the number was " + str(answer))
|
987,865 | 0f1012c9ebadb2342666c69d12273975149bd225 | '''
This module contains the class tasks
'''
from __future__ import print_function
import glob
import copy
import atexit
import logging
import os
import shutil
import sys
import re
import locale
import yaml
import TarSCM.scm
import TarSCM.archive
from TarSCM.helpers import Helpers
from TarSCM.changes import Changes
from TarSCM.exceptions import OptionsError
class Tasks():
'''
Class to create a task list for formats which can contain more then one scm
job like snapcraft or appimage
'''
def __init__(self, args):
self.task_list = []
self.cleanup_dirs = []
self.helpers = Helpers()
self.changes = Changes()
self.scm_object = None
self.data_map = None
self.args = args
def cleanup(self):
"""Cleaning temporary directories."""
if self.args.skip_cleanup:
logging.debug("Skipping cleanup")
return
logging.debug("Cleaning: %s", ' '.join(self.cleanup_dirs))
for dirname in self.cleanup_dirs:
if not os.path.exists(dirname):
continue
ploc = locale.getpreferredencoding()
shutil.rmtree(dirname.encode(ploc))
self.cleanup_dirs = []
# Unlock to prevent dead lock in cachedir if exception
# gets raised
if self.scm_object:
self.scm_object.unlock_cache()
# calls the corresponding cleanup routine
self.scm_object.cleanup()
def generate_list(self):
'''
Generate list of scm jobs from appimage.yml, snapcraft.yaml or a single
job from cli arguments.
'''
args = self.args
scms = ['git', 'tar', 'svn', 'bzr', 'hg']
if args.appimage:
# we read the SCM config from appimage.yml
filehandle = open('appimage.yml')
self.data_map = yaml.safe_load(filehandle)
filehandle.close()
args.use_obs_scm = True
build_scms = ()
try:
build_scms = self.data_map['build'].keys()
except (TypeError, KeyError):
pass
# run for each scm an own task
for scm in scms:
if scm not in build_scms:
continue
for url in self.data_map['build'][scm]:
args.url = url
args.scm = scm
self.task_list.append(copy.copy(args))
elif args.snapcraft:
# we read the SCM config from snapcraft.yaml instead
# getting it via parameters
filehandle = open('snapcraft.yaml')
self.data_map = yaml.safe_load(filehandle)
filehandle.close()
args.use_obs_scm = True
# run for each part an own task
for part in self.data_map['parts'].keys():
args.filename = part
if 'source-type' not in self.data_map['parts'][part].keys():
continue
pep8_1 = self.data_map['parts'][part]['source-type']
if pep8_1 not in scms:
continue
# avoid conflicts with files
args.clone_prefix = "_obs_"
args.url = self.data_map['parts'][part]['source']
self.data_map['parts'][part]['source'] = part
args.scm = self.data_map['parts'][part]['source-type']
del self.data_map['parts'][part]['source-type']
self.task_list.append(copy.copy(args))
else:
self.task_list.append(args)
def process_list(self):
'''
process tasks from the task_list
'''
for task in self.task_list:
self.process_single_task(task)
def finalize(self):
'''
final steps after processing task list
'''
args = self.args
if args.snapcraft:
# write the new snapcraft.yaml file
# we prefix our own here to be sure to not overwrite user files,
# if he is using us in "disabled" mode
new_file = args.outdir + '/_service:snapcraft:snapcraft.yaml'
with open(new_file, 'w') as outfile:
outfile.write(yaml.dump(self.data_map,
default_flow_style=False))
# execute also download_files for downloading single sources
if args.snapcraft or args.appimage:
download_files = '/usr/lib/obs/service/download_files'
if os.path.exists(download_files):
cmd = [download_files, '--outdir', args.outdir]
rcode, output = self.helpers.run_cmd(cmd, None)
if rcode != 0:
raise RuntimeError("download_files has failed:%s" % output)
def process_single_task(self, args):
'''
do the work for a single task
'''
self.args = args
logging.basicConfig(format="%(message)s", stream=sys.stderr,
level=logging.INFO)
# force cleaning of our workspace on exit
atexit.register(self.cleanup)
scm2class = {
'git': 'Git',
'bzr': 'Bzr',
'hg': 'Hg',
'svn': 'Svn',
'tar': 'Tar',
}
# create objects for TarSCM.<scm> and TarSCM.helpers
try:
scm_class = getattr(TarSCM.scm, scm2class[args.scm])
except:
raise OptionsError("Please specify valid --scm=... options")
# self.scm_object is need to unlock cache in cleanup
# if exception occurs
self.scm_object = scm_object = scm_class(args, self)
tmode = bool(os.getenv('TAR_SCM_TESTMODE'))
if not tmode and not scm_object.check_url():
sys.exit("--url does not match remote repository")
try:
scm_object.check_scm()
except OSError:
print("Please install '%s'" % scm_object.scm)
sys.exit(1)
scm_object.fetch_upstream()
if args.filename:
dstname = basename = args.filename
else:
dstname = basename = os.path.basename(scm_object.clone_dir)
version = self.get_version()
changesversion = version
if version and not sys.argv[0].endswith("/tar") \
and not sys.argv[0].endswith("/snapcraft") \
and not sys.argv[0].endswith("/appimage"):
if isinstance(dstname, bytes):
version = version.encode('UTF-8')
dstname += '-' + version
logging.debug("DST: %s", dstname)
detected_changes = scm_object.detect_changes()
if not args.use_obs_gbp:
scm_object.prep_tree_for_archive(args.subdir, args.outdir,
dstname=dstname)
self.cleanup_dirs.append(scm_object.arch_dir)
# For the GBP service there is no copy in arch_dir, so use clone_dir
# which has the same content
extract_src = scm_object.arch_dir
if args.use_obs_scm:
arch = TarSCM.archive.ObsCpio()
elif args.use_obs_gbp:
arch = TarSCM.archive.Gbp()
extract_src = scm_object.clone_dir
else:
arch = TarSCM.archive.Tar()
arch.extract_from_archive(extract_src, args.extract,
args.outdir)
arch.create_archive(
scm_object,
basename = basename,
dstname = dstname,
version = version,
cli = args
)
if detected_changes:
self._process_changes(args,
version,
changesversion,
detected_changes)
scm_object.finalize()
def _process_changes(self, args, ver, changesversion, detected_changes):
changesauthor = self.changes.get_changesauthor(args)
logging.debug("AUTHOR: %s", changesauthor)
if not ver:
args.version = "_auto_"
changesversion = self.get_version()
logging.debug("Searching for '*.changes' in %s", os.getcwd())
for filename in glob.glob('*.changes'):
new_changes_file = os.path.join(args.outdir, filename)
shutil.copy(filename, new_changes_file)
self.changes.write_changes(new_changes_file,
detected_changes['lines'],
changesversion, changesauthor)
self.changes.write_changes_revision(args.url, args.outdir,
detected_changes['revision'])
def get_version(self):
'''
Generate final version number by detecting version from scm if not
given as cli option and applying versionrewrite_pattern and
versionprefix if given as cli option
'''
version = self.args.version
if version == '_none_':
return ''
if version == '_auto_' or self.args.versionformat:
version = self.detect_version()
if self.args.versionrewrite_pattern:
regex = re.compile(self.args.versionrewrite_pattern)
version = regex.sub(self.args.versionrewrite_replacement, version)
if self.args.versionprefix:
version = "%s.%s" % (self.args.versionprefix, version)
logging.debug("VERSION(auto): %s", version)
return version
def detect_version(self):
"""Automatic detection of version number for checked-out repository."""
version = self.scm_object.detect_version(self.args.__dict__).strip()
logging.debug("VERSION(auto): %s", version)
return version
|
987,866 | cd3a176eff2198c8f301172b063f85b314e34d6b | """Using inputs, concatenation to hype myself up with inspirational messages."""
__author__: str = "730401081"
# Your solution starts here...
name: str = input("What is your name? ")
print(name + ", you have overcome 100% of the days you've faced!")
print("I am proud of you, " + name + ".")
print("Resting yourself, dear " + name + ", is investing in yourself.")
|
987,867 | 654223c16f0a202d01f8bb5d845d8dbab406a84b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
url = 'http://www.magtu.ru/student/bakalavriat-spetsialitet-magistratura/raspisanie-konsultatsij-prepodavatelej.html'
import requests
rs = requests.get(url)
from bs4 import BeautifulSoup
root = BeautifulSoup(rs.content, 'html.parser')
from urllib.parse import urljoin
for tag in root.select('[itemprop=articleBody] > *'):
name = tag.name
if name == 'h3':
print(tag.text.upper())
continue
elif name == 'p':
print(tag.text)
continue
elif name == 'ul':
for li in tag.select('li'):
if li.a:
print(' "{}": {}'.format(li.a.text, urljoin(rs.url, li.a['href'])))
else:
print(' "{}"'.format(li.text))
print()
continue
|
987,868 | 19456565e86839abbe39c4c79857d2017c47feb2 | #!/usr/bin/python3
import common.db_helper as db
tablename='tb_assets_detail'
def insert_demo():
cursor=db.get_cursor()
cursor.execute("select column_name,data_type,column_comment from information_schema.COLUMNS where table_name = '%s'" % tablename)
rows=cursor.fetchall()
insert_sql="insert into %s" % (tablename)
columns=""
columns_values=""
type_list=[]
type_values_list=[]
for index,row in enumerate(rows):
columnname=row[0]
type=row[1]
comment=row[2]
if index==len(rows)-1:
columns += columnname
columns_values += "?%s" % (columnname)
else:
columns+=row[0]+","
columns_values+="?%s," % (columnname)
if type=="varchar":
type="String"
elif type=="float":
type="String"
elif type=="datetime":
type="DateTime"
elif type=="int":
type="String"
if comment.find("日期")>0:
comment="this.%s.Value" % (columnname)
elif comment.find("下拉框")>0:
comment = "this.%s.SelectedValue" % (columnname)
else:
comment = "this.%s.Text" % (columnname)
type_list.append("""new MySqlParameter("?%s",MySqlDbType.%s)""" % (columnname,type))
type_values_list.append("""
parass[kkk].Value = %s;
kkk+=1;""" % (comment))
insert_sql="%s(%s) values(%s)" % (insert_sql,columns,columns_values)
print(insert_sql)
for t in type_list:
print(t+",")
for t in type_values_list:
print(t)
def load_demo():
cursor = db.get_cursor()
cursor.execute("select column_name,data_type,column_comment from information_schema.COLUMNS where table_name = '%s'" % tablename)
rows = cursor.fetchall()
for index, row in enumerate(rows):
columnname = row[0]
if columnname=="c99" or columnname=="c100":
continue
type = row[1]
comment=row[2]
if type == "varchar":
type = "String"
elif type == "float":
type = "Float"
elif type == "date":
type = "Date"
elif type == "int":
type = "Int32"
text=""
if comment :
if comment.find('下拉框')>0:
comment='SelectedValue'
text = """this.%s.%s=reader["%s"]!=null ? reader["%s"].ToString() : null;""" % (columnname, comment, columnname,columnname)
elif comment.find('日期')>0:
comment = 'Value'
text = """this.%s.%s=reader["%s"]!=null ?Convert.ToDateTime( reader["%s"]) : DateTime.Now;""" % (columnname, comment,columnname, columnname)
else:
comment="Text"
text = """this.%s.%s=reader["%s"]!=null ? reader["%s"].ToString() : null;""" % (columnname, comment, columnname,columnname)
print(text)
def update_demo():
cursor = db.get_cursor()
cursor.execute(
"select column_name,data_type,column_comment from information_schema.COLUMNS where table_name = '%s'" % tablename)
rows = cursor.fetchall()
update_content_list=[]
fileds=""
for index, row in enumerate(rows):
columnname = row[0]
if columnname == "c99" or columnname == "c100":
continue
type = row[1]
comment = row[2]
if type == "varchar":
type = "String"
elif type == "float":
type = "Float"
elif type == "date":
type = "Date"
elif type == "int":
type = "Int32"
if comment.find("日期")>0:
comment="this.%s.Value" % (columnname)
elif comment.find("下拉框")>0:
comment = "this.%s.SelectedValue" % (columnname)
else:
comment = "this.%s.Text" % (columnname)
fileds+="%s=?%s," % (columnname,columnname)
text = ""
para_value="""parass[i].Value=%s;""" % (comment)
i_variable="i+=1;"
print(para_value)
print(i_variable)
if text:
update_content_list.append(text)
print(fileds)
for t in update_content_list:
print(t)
if __name__=="__main__":
insert_demo() |
987,869 | df7207c85a90ae874d91bdeccc4eb917d766dee1 | from cleverbot import Cleverbot
bot1 = Cleverbot()
bot2 = Cleverbot()
text = 'こんにちは'
while True:
text = bot1.ask(text).encode('ISO-8859-1').decode('utf-8')
print('bot1 >> {}'.format(text))
text = bot2.ask(text).encode('ISO-8859-1').decode('utf-8')
print('bot2 >> {}'.format(text))
|
987,870 | 36ae837fabc9f9e80da38bb89e3bf97063e8ac8d | # Copyright (C) 2003-2013 Python Software Foundation
import copy
import operator
import pickle
import struct
import unittest
import plistlib
import os
import datetime
import codecs
import binascii
import collections
from test import support
from test.support import os_helper
from io import BytesIO
from plistlib import UID
ALL_FORMATS=(plistlib.FMT_XML, plistlib.FMT_BINARY)
# The testdata is generated using Mac/Tools/plistlib_generate_testdata.py
# (which using PyObjC to control the Cocoa classes for generating plists)
TESTDATA={
plistlib.FMT_XML: binascii.a2b_base64(b'''
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NU
WVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VO
IiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4w
LmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+YUJp
Z0ludDwva2V5PgoJPGludGVnZXI+OTIyMzM3MjAzNjg1NDc3NTc2NDwvaW50
ZWdlcj4KCTxrZXk+YUJpZ0ludDI8L2tleT4KCTxpbnRlZ2VyPjkyMjMzNzIw
MzY4NTQ3NzU4NTI8L2ludGVnZXI+Cgk8a2V5PmFEYXRlPC9rZXk+Cgk8ZGF0
ZT4yMDA0LTEwLTI2VDEwOjMzOjMzWjwvZGF0ZT4KCTxrZXk+YURpY3Q8L2tl
eT4KCTxkaWN0PgoJCTxrZXk+YUZhbHNlVmFsdWU8L2tleT4KCQk8ZmFsc2Uv
PgoJCTxrZXk+YVRydWVWYWx1ZTwva2V5PgoJCTx0cnVlLz4KCQk8a2V5PmFV
bmljb2RlVmFsdWU8L2tleT4KCQk8c3RyaW5nPk3DpHNzaWcsIE1hw588L3N0
cmluZz4KCQk8a2V5PmFub3RoZXJTdHJpbmc8L2tleT4KCQk8c3RyaW5nPiZs
dDtoZWxsbyAmYW1wOyAnaGknIHRoZXJlISZndDs8L3N0cmluZz4KCQk8a2V5
PmRlZXBlckRpY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5hPC9rZXk+CgkJ
CTxpbnRlZ2VyPjE3PC9pbnRlZ2VyPgoJCQk8a2V5PmI8L2tleT4KCQkJPHJl
YWw+MzIuNTwvcmVhbD4KCQkJPGtleT5jPC9rZXk+CgkJCTxhcnJheT4KCQkJ
CTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8aW50ZWdlcj4yPC9pbnRlZ2Vy
PgoJCQkJPHN0cmluZz50ZXh0PC9zdHJpbmc+CgkJCTwvYXJyYXk+CgkJPC9k
aWN0PgoJPC9kaWN0PgoJPGtleT5hRmxvYXQ8L2tleT4KCTxyZWFsPjAuNTwv
cmVhbD4KCTxrZXk+YUxpc3Q8L2tleT4KCTxhcnJheT4KCQk8c3RyaW5nPkE8
L3N0cmluZz4KCQk8c3RyaW5nPkI8L3N0cmluZz4KCQk8aW50ZWdlcj4xMjwv
aW50ZWdlcj4KCQk8cmVhbD4zMi41PC9yZWFsPgoJCTxhcnJheT4KCQkJPGlu
dGVnZXI+MTwvaW50ZWdlcj4KCQkJPGludGVnZXI+MjwvaW50ZWdlcj4KCQkJ
PGludGVnZXI+MzwvaW50ZWdlcj4KCQk8L2FycmF5PgoJPC9hcnJheT4KCTxr
ZXk+YU5lZ2F0aXZlQmlnSW50PC9rZXk+Cgk8aW50ZWdlcj4tODAwMDAwMDAw
MDA8L2ludGVnZXI+Cgk8a2V5PmFOZWdhdGl2ZUludDwva2V5PgoJPGludGVn
ZXI+LTU8L2ludGVnZXI+Cgk8a2V5PmFTdHJpbmc8L2tleT4KCTxzdHJpbmc+
RG9vZGFoPC9zdHJpbmc+Cgk8a2V5PmFuRW1wdHlEaWN0PC9rZXk+Cgk8ZGlj
dC8+Cgk8a2V5PmFuRW1wdHlMaXN0PC9rZXk+Cgk8YXJyYXkvPgoJPGtleT5h
bkludDwva2V5PgoJPGludGVnZXI+NzI4PC9pbnRlZ2VyPgoJPGtleT5uZXN0
ZWREYXRhPC9rZXk+Cgk8YXJyYXk+CgkJPGRhdGE+CgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5k
VzVyCgkJUGdBQkFnTThiRzkwY3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJ
RFBHeHZkSE1nYjJZZ1ltbHVZWEo1CgkJSUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004Ykc5MGN5QnZaaUJpCgkJYVc1
aGNua2daM1Z1YXo0QUFRSURQR3h2ZEhNZ2IyWWdZbWx1WVhKNUlHZDFibXMr
QUFFQ0F6eHNiM1J6CgkJSUc5bUlHSnBibUZ5ZVNCbmRXNXJQZ0FCQWdNOGJH
OTBjeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlECgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09CgkJPC9kYXRhPgoJPC9hcnJheT4K
CTxrZXk+c29tZURhdGE8L2tleT4KCTxkYXRhPgoJUEdKcGJtRnllU0JuZFc1
clBnPT0KCTwvZGF0YT4KCTxrZXk+c29tZU1vcmVEYXRhPC9rZXk+Cgk8ZGF0
YT4KCVBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004CgliRzkwY3lCdlppQmlhVzVo
Y25rZ1ozVnVhejRBQVFJRFBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytB
QUVDQXp4cwoJYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVyUGdBQkFnTThiRzkw
Y3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJRFBHeHYKCWRITWdiMllnWW1s
dVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVy
UGdBQkFnTThiRzkwCgljeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlEUEd4
dmRITWdiMllnWW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09Cgk8L2RhdGE+Cgk8
a2V5PsOFYmVucmFhPC9rZXk+Cgk8c3RyaW5nPlRoYXQgd2FzIGEgdW5pY29k
ZSBrZXkuPC9zdHJpbmc+CjwvZGljdD4KPC9wbGlzdD4K'''),
plistlib.FMT_BINARY: binascii.a2b_base64(b'''
YnBsaXN0MDDfEBABAgMEBQYHCAkKCwwNDg8QERITFCgpLzAxMjM0NTc2OFdh
QmlnSW50WGFCaWdJbnQyVWFEYXRlVWFEaWN0VmFGbG9hdFVhTGlzdF8QD2FO
ZWdhdGl2ZUJpZ0ludFxhTmVnYXRpdmVJbnRXYVN0cmluZ1thbkVtcHR5RGlj
dFthbkVtcHR5TGlzdFVhbkludFpuZXN0ZWREYXRhWHNvbWVEYXRhXHNvbWVN
b3JlRGF0YWcAxQBiAGUAbgByAGEAYRN/////////1BQAAAAAAAAAAIAAAAAA
AAAsM0GcuX30AAAA1RUWFxgZGhscHR5bYUZhbHNlVmFsdWVaYVRydWVWYWx1
ZV1hVW5pY29kZVZhbHVlXWFub3RoZXJTdHJpbmdaZGVlcGVyRGljdAgJawBN
AOQAcwBzAGkAZwAsACAATQBhAN9fEBU8aGVsbG8gJiAnaGknIHRoZXJlIT7T
HyAhIiMkUWFRYlFjEBEjQEBAAAAAAACjJSYnEAEQAlR0ZXh0Iz/gAAAAAAAA
pSorLCMtUUFRQhAMoyUmLhADE////+1foOAAE//////////7VkRvb2RhaNCg
EQLYoTZPEPo8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmlu
YXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBv
ZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4A
AQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBn
dW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDTTxiaW5hcnkgZ3Vu
az5fEBdUaGF0IHdhcyBhIHVuaWNvZGUga2V5LgAIACsAMwA8AEIASABPAFUA
ZwB0AHwAiACUAJoApQCuALsAygDTAOQA7QD4AQQBDwEdASsBNgE3ATgBTwFn
AW4BcAFyAXQBdgF/AYMBhQGHAYwBlQGbAZ0BnwGhAaUBpwGwAbkBwAHBAcIB
xQHHAsQC0gAAAAAAAAIBAAAAAAAAADkAAAAAAAAAAAAAAAAAAALs'''),
'KEYED_ARCHIVE': binascii.a2b_base64(b'''
YnBsaXN0MDDUAQIDBAUGHB1YJHZlcnNpb25YJG9iamVjdHNZJGFyY2hpdmVy
VCR0b3ASAAGGoKMHCA9VJG51bGzTCQoLDA0OVnB5dHlwZVYkY2xhc3NZTlMu
c3RyaW5nEAGAAl8QE0tleUFyY2hpdmUgVUlEIFRlc3TTEBESExQZWiRjbGFz
c25hbWVYJGNsYXNzZXNbJGNsYXNzaGludHNfEBdPQ19CdWlsdGluUHl0aG9u
VW5pY29kZaQVFhcYXxAXT0NfQnVpbHRpblB5dGhvblVuaWNvZGVfEBBPQ19Q
eXRob25Vbmljb2RlWE5TU3RyaW5nWE5TT2JqZWN0ohobXxAPT0NfUHl0aG9u
U3RyaW5nWE5TU3RyaW5nXxAPTlNLZXllZEFyY2hpdmVy0R4fVHJvb3SAAQAI
ABEAGgAjAC0AMgA3ADsAQQBIAE8AVgBgAGIAZAB6AIEAjACVAKEAuwDAANoA
7QD2AP8BAgEUAR0BLwEyATcAAAAAAAACAQAAAAAAAAAgAAAAAAAAAAAAAAAA
AAABOQ=='''),
}
XML_PLIST_WITH_ENTITY=b'''\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd" [
<!ENTITY entity "replacement text">
]>
<plist version="1.0">
<dict>
<key>A</key>
<string>&entity;</string>
</dict>
</plist>
'''
INVALID_BINARY_PLISTS = [
('too short data',
b''
),
('too large offset_table_offset and offset_size = 1',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x2a'
),
('too large offset_table_offset and nonstandard offset_size',
b'\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x03\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x2c'
),
('integer overflow in offset_table_offset',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xff\xff\xff\xff\xff\xff\xff\xff'
),
('too large top_object',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('integer overflow in top_object',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\xff\xff\xff\xff\xff\xff\xff\xff'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('too large num_objects and offset_size = 1',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\xff'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('too large num_objects and nonstandard offset_size',
b'\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x03\x01'
b'\x00\x00\x00\x00\x00\x00\x00\xff'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('extremally large num_objects (32 bit)',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x7f\xff\xff\xff'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('extremally large num_objects (64 bit)',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\xff\xff\xff\xff\xff'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('integer overflow in num_objects',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\xff\xff\xff\xff\xff\xff\xff\xff'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('offset_size = 0',
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('ref_size = 0',
b'\xa1\x01\x00\x08\x0a'
b'\x00\x00\x00\x00\x00\x00\x01\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0b'
),
('too large offset',
b'\x00\x2a'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('integer overflow in offset',
b'\x00\xff\xff\xff\xff\xff\xff\xff\xff'
b'\x00\x00\x00\x00\x00\x00\x08\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09'
),
('too large array size',
b'\xaf\x00\x01\xff\x00\x08\x0c'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0d'
),
('extremally large array size (32-bit)',
b'\xaf\x02\x7f\xff\xff\xff\x01\x00\x08\x0f'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x10'
),
('extremally large array size (64-bit)',
b'\xaf\x03\x00\x00\x00\xff\xff\xff\xff\xff\x01\x00\x08\x13'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x14'
),
('integer overflow in array size',
b'\xaf\x03\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x08\x13'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x14'
),
('too large reference index',
b'\xa1\x02\x00\x08\x0a'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0b'
),
('integer overflow in reference index',
b'\xa1\xff\xff\xff\xff\xff\xff\xff\xff\x00\x08\x11'
b'\x00\x00\x00\x00\x00\x00\x01\x08'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x12'
),
('too large bytes size',
b'\x4f\x00\x23\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0c'
),
('extremally large bytes size (32-bit)',
b'\x4f\x02\x7f\xff\xff\xff\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0f'
),
('extremally large bytes size (64-bit)',
b'\x4f\x03\x00\x00\x00\xff\xff\xff\xff\xff\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13'
),
('integer overflow in bytes size',
b'\x4f\x03\xff\xff\xff\xff\xff\xff\xff\xff\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13'
),
('too large ASCII size',
b'\x5f\x00\x23\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0c'
),
('extremally large ASCII size (32-bit)',
b'\x5f\x02\x7f\xff\xff\xff\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0f'
),
('extremally large ASCII size (64-bit)',
b'\x5f\x03\x00\x00\x00\xff\xff\xff\xff\xff\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13'
),
('integer overflow in ASCII size',
b'\x5f\x03\xff\xff\xff\xff\xff\xff\xff\xff\x41\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13'
),
('invalid ASCII',
b'\x51\xff\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0a'
),
('too large UTF-16 size',
b'\x6f\x00\x13\x20\xac\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0e'
),
('extremally large UTF-16 size (32-bit)',
b'\x6f\x02\x4f\xff\xff\xff\x20\xac\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x11'
),
('extremally large UTF-16 size (64-bit)',
b'\x6f\x03\x00\x00\x00\xff\xff\xff\xff\xff\x20\xac\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x15'
),
('integer overflow in UTF-16 size',
b'\x6f\x03\xff\xff\xff\xff\xff\xff\xff\xff\x20\xac\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x15'
),
('invalid UTF-16',
b'\x61\xd8\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0b'
),
('non-hashable key',
b'\xd1\x01\x01\xa0\x08\x0b'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0c'
),
('too large datetime (datetime overflow)',
b'\x33\x42\x50\x00\x00\x00\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x11'
),
('too large datetime (timedelta overflow)',
b'\x33\x42\xe0\x00\x00\x00\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x11'
),
('invalid datetime (Infinity)',
b'\x33\x7f\xf0\x00\x00\x00\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x11'
),
('invalid datetime (NaN)',
b'\x33\x7f\xf8\x00\x00\x00\x00\x00\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x11'
),
]
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(os_helper.TESTFN)
except:
pass
def _create(self, fmt=None):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aBigInt = 2 ** 63 - 44,
aBigInt2 = 2 ** 63 + 44,
aNegativeInt = -5,
aNegativeBigInt = -80000000000,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>\0\1\2\3" * 10,
nestedData = [b"<lots of binary gunk>\0\1\2\3" * 10],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
anEmptyDict = dict(),
anEmptyList = list()
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
with open(os_helper.TESTFN, 'wb') as fp:
plistlib.dump(pl, fp)
with open(os_helper.TESTFN, 'rb') as fp:
pl2 = plistlib.load(fp)
self.assertEqual(dict(pl), dict(pl2))
self.assertRaises(AttributeError, plistlib.dump, pl, 'filename')
self.assertRaises(AttributeError, plistlib.load, 'filename')
def test_invalid_type(self):
pl = [ object() ]
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
def test_invalid_uid(self):
with self.assertRaises(TypeError):
UID("not an int")
with self.assertRaises(ValueError):
UID(2 ** 64)
with self.assertRaises(ValueError):
UID(-19)
def test_int(self):
for pl in [0, 2**8-1, 2**8, 2**16-1, 2**16, 2**32-1, 2**32,
2**63-1, 2**64-1, 1, -2**63]:
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, int)
self.assertEqual(pl, pl2)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
for fmt in ALL_FORMATS:
for pl in (2 ** 64 + 1, 2 ** 127-1, -2**64, -2 ** 127):
with self.subTest(pl=pl, fmt=fmt):
self.assertRaises(OverflowError, plistlib.dumps,
pl, fmt=fmt)
def test_bytearray(self):
for pl in (b'<binary gunk>', b"<lots of binary gunk>\0\1\2\3" * 10):
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(bytearray(pl), fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, bytes)
self.assertEqual(pl2, pl)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
def test_bytes(self):
pl = self._create()
data = plistlib.dumps(pl)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.dumps(pl2)
self.assertEqual(data, data2)
def test_indentation_array(self):
data = [[[[[[[[{'test': b'aaaaaa'}]]]]]]]]
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict(self):
data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': b'aaaaaa'}}}}}}}}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict_mix(self):
data = {'1': {'2': [{'3': [[[[[{'test': b'aaaaaa'}]]]]]}]}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_uid(self):
data = UID(1)
self.assertEqual(plistlib.loads(plistlib.dumps(data, fmt=plistlib.FMT_BINARY)), data)
dict_data = {
'uid0': UID(0),
'uid2': UID(2),
'uid8': UID(2 ** 8),
'uid16': UID(2 ** 16),
'uid32': UID(2 ** 32),
'uid63': UID(2 ** 63)
}
self.assertEqual(plistlib.loads(plistlib.dumps(dict_data, fmt=plistlib.FMT_BINARY)), dict_data)
def test_uid_data(self):
uid = UID(1)
self.assertEqual(uid.data, 1)
def test_uid_eq(self):
self.assertEqual(UID(1), UID(1))
self.assertNotEqual(UID(1), UID(2))
self.assertNotEqual(UID(1), "not uid")
def test_uid_hash(self):
self.assertEqual(hash(UID(1)), hash(UID(1)))
def test_uid_repr(self):
self.assertEqual(repr(UID(1)), "UID(1)")
def test_uid_index(self):
self.assertEqual(operator.index(UID(1)), 1)
def test_uid_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(pickle.loads(pickle.dumps(UID(19), protocol=proto)), UID(19))
def test_uid_copy(self):
self.assertEqual(copy.copy(UID(1)), UID(1))
self.assertEqual(copy.deepcopy(UID(1)), UID(1))
def test_appleformatting(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
pl = plistlib.loads(TESTDATA[fmt])
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(data, TESTDATA[fmt],
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
self.maxDiff = None
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
pl = self._create(fmt=fmt)
pl2 = plistlib.loads(TESTDATA[fmt], fmt=fmt)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
pl2 = plistlib.loads(TESTDATA[fmt])
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
b = BytesIO()
pl = self._create(fmt=fmt)
plistlib.dump(pl, b, fmt=fmt)
pl2 = plistlib.load(BytesIO(b.getvalue()), fmt=fmt)
self.assertEqual(dict(pl), dict(pl2))
pl2 = plistlib.load(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_keysort_bytesio(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
b = BytesIO()
plistlib.dump(pl, b, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.load(BytesIO(b.getvalue()),
dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keysort(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
data = plistlib.dumps(pl, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.loads(data, dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keys_no_string(self):
pl = { 42: 'aNumber' }
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
b = BytesIO()
self.assertRaises(TypeError, plistlib.dump, pl, b, fmt=fmt)
def test_skipkeys(self):
pl = {
42: 'aNumber',
'snake': 'aWord',
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(
pl, fmt=fmt, skipkeys=True, sort_keys=False)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {'snake': 'aWord'})
fp = BytesIO()
plistlib.dump(
pl, fp, fmt=fmt, skipkeys=True, sort_keys=False)
data = fp.getvalue()
pl2 = plistlib.loads(fp.getvalue())
self.assertEqual(pl2, {'snake': 'aWord'})
def test_tuple_members(self):
pl = {
'first': (1, 2),
'second': (1, 2),
'third': (3, 4),
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
if fmt != plistlib.FMT_BINARY:
self.assertIsNot(pl2['first'], pl2['second'])
def test_list_members(self):
pl = {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_dict_members(self):
pl = {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
data = plistlib.dumps(testString, fmt=plistlib.FMT_XML)
if c != "\r":
self.assertEqual(plistlib.loads(data), testString)
else:
with self.assertRaises(ValueError):
plistlib.dumps(testString, fmt=plistlib.FMT_XML)
plistlib.dumps(testString, fmt=plistlib.FMT_BINARY)
def test_non_bmp_characters(self):
pl = {'python': '\U0001f40d'}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(plistlib.loads(data), pl)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_lone_surrogates(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
with self.assertRaises(UnicodeEncodeError):
plistlib.dumps('\ud8ff', fmt=fmt)
with self.assertRaises(UnicodeEncodeError):
plistlib.dumps('\udcff', fmt=fmt)
def test_nondictroot(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.loads(plistlib.dumps(test1, fmt=fmt))
result2 = plistlib.loads(plistlib.dumps(test2, fmt=fmt))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><array>%s</array></plist>"%i).encode())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.loads,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not integer</integer></plist>")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not real</integer></plist>")
def test_integer_notations(self):
pl = b"<plist><integer>456</integer></plist>"
value = plistlib.loads(pl)
self.assertEqual(value, 456)
pl = b"<plist><integer>0xa</integer></plist>"
value = plistlib.loads(pl)
self.assertEqual(value, 10)
pl = b"<plist><integer>0123</integer></plist>"
value = plistlib.loads(pl)
self.assertEqual(value, 123)
def test_xml_encodings(self):
base = TESTDATA[plistlib.FMT_XML]
for xml_encoding, encoding, bom in [
(b'utf-8', 'utf-8', codecs.BOM_UTF8),
(b'utf-16', 'utf-16-le', codecs.BOM_UTF16_LE),
(b'utf-16', 'utf-16-be', codecs.BOM_UTF16_BE),
# Expat does not support UTF-32
#(b'utf-32', 'utf-32-le', codecs.BOM_UTF32_LE),
#(b'utf-32', 'utf-32-be', codecs.BOM_UTF32_BE),
]:
pl = self._create(fmt=plistlib.FMT_XML)
with self.subTest(encoding=encoding):
data = base.replace(b'UTF-8', xml_encoding)
data = bom + data.decode('utf-8').encode(encoding)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
def test_dump_invalid_format(self):
with self.assertRaises(ValueError):
plistlib.dumps({}, fmt="blah")
def test_load_invalid_file(self):
with self.assertRaises(plistlib.InvalidFileException):
plistlib.loads(b"these are not plist file contents")
def test_modified_uid_negative(self):
neg_uid = UID(1)
neg_uid.data = -1 # dodge the negative check in the constructor
with self.assertRaises(ValueError):
plistlib.dumps(neg_uid, fmt=plistlib.FMT_BINARY)
def test_modified_uid_huge(self):
huge_uid = UID(1)
huge_uid.data = 2 ** 64 # dodge the size check in the constructor
with self.assertRaises(OverflowError):
plistlib.dumps(huge_uid, fmt=plistlib.FMT_BINARY)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_xml_plist_with_entity_decl(self):
with self.assertRaisesRegex(plistlib.InvalidFileException,
"XML entity declarations are not supported"):
plistlib.loads(XML_PLIST_WITH_ENTITY, fmt=plistlib.FMT_XML)
class TestBinaryPlistlib(unittest.TestCase):
@staticmethod
def decode(*objects, offset_size=1, ref_size=1):
data = [b'bplist00']
offset = 8
offsets = []
for x in objects:
offsets.append(offset.to_bytes(offset_size, 'big'))
data.append(x)
offset += len(x)
tail = struct.pack('>6xBBQQQ', offset_size, ref_size,
len(objects), 0, offset)
data.extend(offsets)
data.append(tail)
return plistlib.loads(b''.join(data), fmt=plistlib.FMT_BINARY)
def test_nonstandard_refs_size(self):
# Issue #21538: Refs and offsets are 24-bit integers
data = (b'bplist00'
b'\xd1\x00\x00\x01\x00\x00\x02QaQb'
b'\x00\x00\x08\x00\x00\x0f\x00\x00\x11'
b'\x00\x00\x00\x00\x00\x00'
b'\x03\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13')
self.assertEqual(plistlib.loads(data), {'a': 'b'})
def test_dump_duplicates(self):
# Test effectiveness of saving duplicated objects
for x in (None, False, True, 12345, 123.45, 'abcde', 'абвгд', b'abcde',
datetime.datetime(2004, 10, 26, 10, 33, 33),
bytearray(b'abcde'), [12, 345], (12, 345), {'12': 345}):
with self.subTest(x=x):
data = plistlib.dumps([x]*1000, fmt=plistlib.FMT_BINARY)
self.assertLess(len(data), 1100, repr(data))
def test_identity(self):
for x in (None, False, True, 12345, 123.45, 'abcde', b'abcde',
datetime.datetime(2004, 10, 26, 10, 33, 33),
bytearray(b'abcde'), [12, 345], (12, 345), {'12': 345}):
with self.subTest(x=x):
data = plistlib.dumps([x]*2, fmt=plistlib.FMT_BINARY)
a, b = plistlib.loads(data)
if isinstance(x, tuple):
x = list(x)
self.assertEqual(a, x)
self.assertEqual(b, x)
self.assertIs(a, b)
def test_cycles(self):
# recursive list
a = []
a.append(a)
b = plistlib.loads(plistlib.dumps(a, fmt=plistlib.FMT_BINARY))
self.assertIs(b[0], b)
# recursive tuple
a = ([],)
a[0].append(a)
b = plistlib.loads(plistlib.dumps(a, fmt=plistlib.FMT_BINARY))
self.assertIs(b[0][0], b)
# recursive dict
a = {}
a['x'] = a
b = plistlib.loads(plistlib.dumps(a, fmt=plistlib.FMT_BINARY))
self.assertIs(b['x'], b)
def test_deep_nesting(self):
for N in [300, 100000]:
chunks = [b'\xa1' + (i + 1).to_bytes(4, 'big') for i in range(N)]
try:
result = self.decode(*chunks, b'\x54seed', offset_size=4, ref_size=4)
except RecursionError:
pass
else:
for i in range(N):
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
result = result[0]
self.assertEqual(result, 'seed')
def test_large_timestamp(self):
# Issue #26709: 32-bit timestamp out of range
for ts in -2**31-1, 2**31:
with self.subTest(ts=ts):
d = (datetime.datetime.utcfromtimestamp(0) +
datetime.timedelta(seconds=ts))
data = plistlib.dumps(d, fmt=plistlib.FMT_BINARY)
self.assertEqual(plistlib.loads(data), d)
def test_load_singletons(self):
self.assertIs(self.decode(b'\x00'), None)
self.assertIs(self.decode(b'\x08'), False)
self.assertIs(self.decode(b'\x09'), True)
self.assertEqual(self.decode(b'\x0f'), b'')
def test_load_int(self):
self.assertEqual(self.decode(b'\x10\x00'), 0)
self.assertEqual(self.decode(b'\x10\xfe'), 0xfe)
self.assertEqual(self.decode(b'\x11\xfe\xdc'), 0xfedc)
self.assertEqual(self.decode(b'\x12\xfe\xdc\xba\x98'), 0xfedcba98)
self.assertEqual(self.decode(b'\x13\x01\x23\x45\x67\x89\xab\xcd\xef'),
0x0123456789abcdef)
self.assertEqual(self.decode(b'\x13\xfe\xdc\xba\x98\x76\x54\x32\x10'),
-0x123456789abcdf0)
def test_unsupported(self):
unsupported = [*range(1, 8), *range(10, 15),
0x20, 0x21, *range(0x24, 0x33), *range(0x34, 0x40)]
for i in [0x70, 0x90, 0xb0, 0xc0, 0xe0, 0xf0]:
unsupported.extend(i + j for j in range(16))
for token in unsupported:
with self.subTest(f'token {token:02x}'):
with self.assertRaises(plistlib.InvalidFileException):
self.decode(bytes([token]) + b'\x00'*16)
def test_invalid_binary(self):
for name, data in INVALID_BINARY_PLISTS:
with self.subTest(name):
with self.assertRaises(plistlib.InvalidFileException):
plistlib.loads(b'bplist00' + data, fmt=plistlib.FMT_BINARY)
class TestKeyedArchive(unittest.TestCase):
def test_keyed_archive_data(self):
# This is the structure of a NSKeyedArchive packed plist
data = {
'$version': 100000,
'$objects': [
'$null', {
'pytype': 1,
'$class': UID(2),
'NS.string': 'KeyArchive UID Test'
},
{
'$classname': 'OC_BuiltinPythonUnicode',
'$classes': [
'OC_BuiltinPythonUnicode',
'OC_PythonUnicode',
'NSString',
'NSObject'
],
'$classhints': [
'OC_PythonString', 'NSString'
]
}
],
'$archiver': 'NSKeyedArchiver',
'$top': {
'root': UID(1)
}
}
self.assertEqual(plistlib.loads(TESTDATA["KEYED_ARCHIVE"]), data)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {"PlistFormat", "PLISTHEADER"}
support.check__all__(self, plistlib, not_exported=not_exported)
if __name__ == '__main__':
unittest.main()
|
987,871 | ab6dbf875757b2af6a38373c0643b2d26b5d775e | my_list = [
7,
5,
3,
3,
2
]
newPoint = int(input('Введите новый балл для рейтинга: '))
for index, value in enumerate(my_list):
if value < newPoint:
my_list.insert(index, newPoint)
break
print(my_list)
|
987,872 | e441d92b71ee02111434a4f25224e73799a0c76c | def func(inp):
if int_input <= 10:
return True
user_input = raw_input("Enter a number between 0 and 10.\n")
int_input = int(user_input)
funkciq = func(int_input)
if funkciq == True:
print "Great. This is:",funkciq
print
else:
print "%s is more than 10. This is False" % int_input
|
987,873 | 593c720decf9ae3e2b1de8cb18f1594f699cf65f | import pytest
from eth_utils.toolz import (
partial,
)
from web3._utils.blocks import (
select_method_for_block_identifier,
)
selector_fn = partial(
select_method_for_block_identifier,
if_hash="test_hash",
if_number="test_number",
if_predefined="test_predefined",
)
@pytest.mark.parametrize(
"input,expected",
(
("latest", "test_predefined"),
("pending", "test_predefined"),
("earliest", "test_predefined"),
("safe", "test_predefined"),
("finalized", "test_predefined"),
(-1, ValueError),
(0, "test_number"),
(1, "test_number"),
(4000000, "test_number"),
("0x0", "test_number"),
("0x00", "test_number"),
("0x1", "test_number"),
("0x01", "test_number"),
(hex(4000000), "test_number"),
("0x" + "".zfill(64), "test_hash"),
),
)
def test_select_method_for_block_identifier(input, expected):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
selector_fn(input)
else:
actual = selector_fn(input)
assert actual == expected
|
987,874 | 01d2ca7d0da61965cd3a43687916b367a05b852f | from django.contrib import admin
from .models import Manager,Roles,Property,Badges,Channel,ContactInformation,Location,Distances,Login,Registrations,AddOnes,ExternalCertificates,Gallery,Rents,Reviews,G_R_P_R,Add_External,M_B_R_C_R_L,L_C,P_E_R,P_D_L
admin.site.register(Manager)
admin.site.register(Roles)
admin.site.register(Property)
admin.site.register(Badges)
admin.site.register(Channel)
admin.site.register(ContactInformation)
admin.site.register(Location)
admin.site.register(Distances)
admin.site.register(Login)
admin.site.register(Registrations)
admin.site.register(AddOnes)
admin.site.register(ExternalCertificates)
admin.site.register(Gallery)
admin.site.register(Rents)
admin.site.register(Reviews)
admin.site.register(G_R_P_R)
admin.site.register(P_D_L)
admin.site.register(Add_External)
admin.site.register(L_C)
admin.site.register(P_E_R)
admin.site.register(M_B_R_C_R_L)
|
987,875 | 469145af1461ae50cd598bb1acc57462d514099d | import json
from datetime import date
from django.core.serializers.json import DjangoJSONEncoder
from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from rest_framework.views import status
from urllib.parse import urlencode
from .models import Todos
from .serializers import TodosSerializer
date_today = date(2018, 12, 25)
date_yesterday= date(2018, 12, 24)
date_tomorrow = date(2018, 12, 26)
# Create your tests here.
# encode URL with a query string
# https://stackoverflow.com/questions/2778247/how-do-i-construct-a-django-reverse-url-using-query-args/5341769#5341769
def url_with_querystring(path, **kwargs):
return path + "?" + urlencode(kwargs)
# tests for models
class TodosModelTest(APITestCase):
def setUp(self):
self.my_todo = Todos.objects.create(
state="T",
due_date=date_today,
text="Call Mom"
)
def test_todo(self):
""""
This test ensures that the todo created in the setup
exists
"""
self.assertEqual(self.my_todo.state, "T")
self.assertEqual(self.my_todo.due_date, date_today)
self.assertEqual(self.my_todo.text, "Call Mom")
self.assertEqual(str(self.my_todo), "Call Mom")
# tests for views
class BaseViewTest(APITestCase):
client = APIClient()
@staticmethod
def create_todo(state="", due_date="", text=""):
if state != "" and due_date != "" and text != "":
Todos.objects.create(
state=state,
due_date=due_date,
text=text
)
def make_a_request(self, kind="post", **kwargs):
"""
Make a post request to create a todo
:param kind: HTTP VERB
:return:
"""
if kind == "post":
return self.client.post(
reverse(
"todos-list-create",
kwargs={
"version": kwargs["version"]
}
),
data=json.dumps(
kwargs["data"],
cls=DjangoJSONEncoder
),
content_type='application/json'
)
elif kind == "put":
return self.client.put(
reverse(
"todos-detail",
kwargs={
"version": kwargs["version"],
"pk": kwargs["id"]
}
),
data=json.dumps(
kwargs["data"],
cls=DjangoJSONEncoder
),
content_type='application/json'
)
else:
return None
def fetch_a_todo(self, pk=0):
return self.client.get(
reverse(
"todos-detail",
kwargs={
"version": "v1",
"pk": pk
}
)
)
def delete_a_todo(self, pk=0):
return self.client.delete(
reverse(
"todos-detail",
kwargs={
"version": "v1",
"pk": pk
}
)
)
def setUp(self):
# add test data
self.create_todo("T", date_tomorrow, "buy eggs")
self.create_todo("I", date_today, "buy milk")
self.create_todo("D", date_yesterday, "buy cheese")
self.create_todo("T", date_tomorrow, "eat breakfast")
self.create_todo("I", date_today, "eat dinner")
self.create_todo("D", date_yesterday, "eat lunch")
self.valid_data = {
"state": "T",
"due_date": "2018-12-25",
"text": "test text"
}
self.invalid_data = {
"state": "",
"due_date": "",
"text": ""
}
self.valid_todo_id = 1
self.invalid_todo_id = 100
class GetAllTodosTest(BaseViewTest):
def test_get_all_todos(self):
"""
This test ensures that all todos added in the setUp method
exist when we make a GET request to the todos/ endpoint
"""
# hit the API endpoint
response = self.client.get(
reverse("todos-list-create", kwargs={"version": "v1"})
)
# fetch the data from db
expected = Todos.objects.all()
serialized = TodosSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test filtering by state "to do"
url_with_filtering = url_with_querystring(
reverse("todos-list-create", kwargs={"version": "v1"}),
state="T"
)
response = self.client.get(url_with_filtering)
expected = Todos.objects.filter(state="T")
serialized = TodosSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test filtering by state "in progress"
url_with_filtering = url_with_querystring(
reverse("todos-list-create", kwargs={"version": "v1"}),
state="I"
)
response = self.client.get(url_with_filtering)
expected = Todos.objects.filter(state="I")
serialized = TodosSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test filtering by state "done"
url_with_filtering = url_with_querystring(
reverse("todos-list-create", kwargs={"version": "v1"}),
state="D"
)
response = self.client.get(url_with_filtering)
expected = Todos.objects.filter(state="D")
serialized = TodosSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test ordering by ascending due date
url_with_filtering = url_with_querystring(
reverse("todos-list-create", kwargs={"version": "v1"}),
ordering="due_date"
)
response = self.client.get(url_with_filtering)
expected = Todos.objects.order_by("due_date")
serialized = TodosSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test ordering by descending due date
url_with_filtering = url_with_querystring(
reverse("todos-list-create", kwargs={"version": "v1"}),
ordering="-due_date"
)
response = self.client.get(url_with_filtering)
expected = Todos.objects.order_by("-due_date")
serialized = TodosSerializer(expected, many=True)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class GetASingleTodosTest(BaseViewTest):
def test_get_a_todo(self):
"""
This test ensures that a single todo of a given id is
returned
"""
# hit the API endpoint
response = self.fetch_a_todo(self.valid_todo_id)
# fetch the data from db
expected = Todos.objects.get(pk=self.valid_todo_id)
serialized = TodosSerializer(expected)
self.assertEqual(response.data, serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test with a todo that does not exist
response = self.fetch_a_todo(self.invalid_todo_id)
self.assertEqual(
response.data["message"],
"TODO with ID: 100 does not exist"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class AddTodosTest(BaseViewTest):
def test_create_a_todo(self):
"""
This test ensures that a single todo can be added
"""
# hit the API endpoint
response = self.make_a_request(
kind="post",
version="v1",
data=self.valid_data
)
self.assertEqual(response.data, self.valid_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# test with invalid data
response = self.make_a_request(
kind="post",
version="v1",
data=self.invalid_data
)
self.assertEqual(
response.data["message"],
"TODO item requires state, due_date and text"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UpdateTodosTest(BaseViewTest):
def test_update_a_todo(self):
"""
This test ensures that a single todo can be updated. In this
test we update the second todo in the db with valid data and
the third todo with invalid data and make assertions
"""
# hit the API endpoint
response = self.make_a_request(
kind="put",
version="v1",
id=2,
data=self.valid_data
)
self.assertEqual(response.data, self.valid_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test with invalid data
response = self.make_a_request(
kind="put",
version="v1",
id=3,
data=self.invalid_data
)
self.assertEqual(
response.data["message"],
"TODO item requires state, due_date and text"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class DeleteTodosTest(BaseViewTest):
def test_delete_a_todo(self):
"""
This test ensures that a todo of given id can be deleted
"""
# hit the API endpoint
response = self.delete_a_todo(1)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# test with invalid data
response = self.delete_a_todo(100)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
987,876 | e39e34522bdc2b430a3803105d76ef03e40b2098 | from django.shortcuts import render
from django.http import HttpResponse
from books.models import Book
from myfirstsite.forms import ContactForm
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
# Create your views here.
def search_form(request):
return render(request, 'search_form.html')
def search(request):
if 'q' in request.GET and request.GET['q']:
q = request.GET['q']
books = Book.objects.filter(title__icontains=q)
return render(request, 'search_results.html', {'books': books, 'query': q})
else:
return render(request, 'search_form.html', {'error': True})
|
987,877 | f09e2d33bf46360285f9a60566eddf96487f9802 | bind = ['0.0.0.0:80']
logconfig = 'config/logging.conf'
|
987,878 | a14e015ceab743e7a2e4ef8aaebd36e4ad8f34e3 | import sys
f = open("data.txt", "r")
number = int(f.readline())
dict = {}
count_p = 0
count_n = 0
# phone_book = dict(input().split() for _ in range(n))
# while count_p<number:
numbers = [f.readline().split(" ") for _ in range(number)]
dict[a] = b
count_p += 1
number_1 = 5
# while count_n<number_1:
# new = f.readline()[0:-1]
new = [f.readline().split() for i in range(0, )]
# print(newlist)
# print(new[0])
new1 = new[1]
new1 = new1[0:-1]
# new[0] = new[0][0:-1]
a = (str(new1)+"="+str(dict[new1])) if (new1 in dict.keys) else ("Not found")
print(a)
# if (new in dict):
# print(str(new)+"="+str(dict[new]))
# else:
# print("Not found")
# # sys.stdout.write("Not")
# count_n+=1
n = int(input())
name_numbers = [input().split() for _ in range(n)]
phone_book = {k: v for k,v in name_numbers}
while True:
try:
name = input()
if name in phone_book:
print('%s=%s' % (name, phone_book[name]))
else:
print('Not found')
except:
break |
987,879 | f177fb04fff0c002f98b9ec5a6dea8c455a43b5e | ii = [('RogePAV2.py', 45), ('RogePAV.py', 8), ('RennJIT.py', 9), ('WestJIT2.py', 11), ('KirbWPW2.py', 8), ('WestJIT.py', 6), ('BellCHM.py', 1)] |
987,880 | 818c38e9be44c351a07457c26bd8f3ac54cf5edc | from data_importer.importers import CSVImporter
from core.models import .
class CSVImporterHistory(CSVImporter):
class Meta:
delimiter = ";"
model = History |
987,881 | fe65f53bb7a723c508eae381e366234eca1680c9 | #!/usr/bin/env python3.6
import sys
import math
import random
import pandas as pd
def dataFrameNames():
return [
"x1", # 1 gamma detected x position [cm]
"y1", # 1 gamma detected y position [cm]
"z1", # 1 gamma detected z position [cm]
"t1", # 1 gamma detection time [ps]
"x2", # 2 gamma detected x position [cm]
"y2", # 2 gamma detected y position [cm]
"z2", # 2 gamma detected z position [cm]
"t2", # 2 gamma detection time [ps]
"vol1", # 1 gamma volume ID
"vol2", # 2 gamma volume ID
"e1", # 1 gamma energy loss during detection [keV]
"e2", # 2 gamma energy loss during detection [keV]
"class", # Type of coincidence(1-true, 2-phantom-scattered, 3-detector-scattered, 4-accidental)
"sX1", # 1 gamma emission x position [cm]
"sY1", # 1 gamma emission y position [cm]
"sZ1" # 1 gamma emission z position [cm]
]
def emissionPoint(row):
sOfL = 0.03 # cm/ps
halfX = (row['x1'] - row['x2'])/2
halfY = (row['y1'] - row['y2'])/2
halfZ = (row['z1'] - row['z2'])/2
LORHalfSize = math.sqrt(halfX**2 + halfY**2 + halfZ**2)
versX = halfX/LORHalfSize
versY = halfY/LORHalfSize
versZ = halfZ/LORHalfSize
dX = row['dt']*sOfL*versX/2
dY = row['dt']*sOfL*versY/2
dZ = row['dt']*sOfL*versZ/2
return(
(row['x1'] + row['x2'])/2 - dX,
(row['y1'] + row['y2'])/2 - dY,
(row['z1'] + row['z2'])/2 - dZ,
)
def distance(row):
return math.sqrt(
(row['sX1'] - row['RX1'])**2 + (row['sY1'] - row['RY1'])**2 + (row['sZ1'] - row['RZ1'])**2
)
def reClass(row):
rowClass = row['class']
if (rowClass == 2 and row['emissionDistance'] < 0.05):
rowClass = 1
return rowClass
def shuffleTheOrder(row):
rowCopy = row.copy()
if bool(random.getrandbits(1)):
rowCopy['x1'] = row['x2']
rowCopy['y1'] = row['y2']
rowCopy['z1'] = row['z2']
rowCopy['e1'] = row['e2']
rowCopy['t1'] = row['t2']
rowCopy['vol1'] = row['vol2']
rowCopy['x2'] = row['x1']
rowCopy['y2'] = row['y1']
rowCopy['z2'] = row['z1']
rowCopy['e2'] = row['e1']
rowCopy['t2'] = row['t1']
rowCopy['vol2'] = row['vol1']
rowCopy['dt'] = -1.0*row['dt']
return rowCopy
def main(argv):
pathToDataLoad = '/mnt/opt/groups/jpet/NEMA_Image_Quality/3000s/'
# pathToDataLoad = '/home/krzemien/workdir/pet/classification/data/'
pathToDataSave = argv[1]
fileName = 'NEMA_IQ_384str_N0_1000_COINCIDENCES_'
part = argv[2]
print("Processing file " + fileName + 'REPAIRED_' + "part" + part)
data = pd.read_csv(
pathToDataLoad + fileName + "part" + part,
sep = "\t",
names=dataFrameNames()
)
data['dt'] = data.apply (lambda row: row['t1'] - row['t2'], axis=1)
data[['RX1','RY1','RZ1']] = data.apply(lambda row: pd.Series(emissionPoint(row)), axis=1)
data['emissionDistance'] = data.apply(lambda row:distance(row), axis=1)
data['class'] = data.apply(lambda row:reClass(row), axis = 1)
data = data.apply(lambda row:shuffleTheOrder(row), axis = 1)
data = data.drop(['dt', 'RX1', 'RY1','RZ1', 'emissionDistance'], axis = 1)
data.to_csv(
pathToDataSave + fileName + 'REPAIRED_' + "part" + part,
header=False, index=False, sep='\t'
)
if __name__ == "__main__":
main(sys.argv)
|
987,882 | 4601b3cac5c46657ce5be0e4bb27d27b51ebd0aa | from __future__ import print_function
from collections import deque
from Symboltable import *
from io import StringIO
import sys
import tokenize
import re
class Scope:
def __init__(self,):
self.variables_scope = {}
self.name = ""
self.param = 0
self.num_param = 0
def get_input(*args, **kw):
"""Read a string from standard input."""
if sys.version[0] == "2":
return raw_input(*args, **kw)
else:
return input(*args, **kw)
class Stack(deque):
push = deque.append
def top(self):
return self[-1]
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Machine:
def __init__(self, code):
self.data_stack = Stack()
self.return_stack = Stack()
self.modulo_stack = Stack()
self.scope_stack = Stack()
self.llamada_prep = Stack()
self.instruction_pointer = 0
self.code = code
self.param = 0
self.table = SymbolTable(None, 'global')
self.operations = {
"%": self.mod,
"*": self.mul,
"+": self.plus,
"-": self.minus,
"/": self.div,
"==": self.condicion,
"=": self.asigna,
"<": self.condicion,
">": self.condicion,
"&&": self.condicion,
"||": self.condicion,
"<=": self.condicion,
">=": self.condicion,
}
self.modulos = {
"programa": self.programa,
"principal": self.principal,
"modulo": self.modulo,
}
self.variables = {
"param": self.parametro,
"global": self.global1,
"local": self.local,
}
self.dis_map = {
"regresa": self.regresa,
"principal": self.principal,
"gotof": self.si,
"end_si": self.end_si,
"repite": self.repite,
"hasta": self.hasta,
"mientras": self.mientras,
"end_mientras": self.end_mientras,
"llamada": self.llamada,
"escribe": self.print,
"leer": self.read,
"jmp": self.jmp,
"goto": self.goto,
"gosub": self.gosub,
"over": self.over,
"println": self.println,
"stack": self.dump_stack,
"exit": self.exit,
}
def pop(self):
return self.data_stack.pop()
def push(self, value):
self.data_stack.push(value)
def top(self):
return self.data_stack.top()
def run(self):
while self.instruction_pointer < len(self.code):
opcode = self.code[self.instruction_pointer]
self.instruction_pointer += 1
self.dispatch(opcode, self.table)
def dispatch(self, quadruplo, table):
# print(quadruplo)
# quad1 = quadruplo.split(",")
# quad = []
# for item in quad1:
# term = re.sub(r'["|\[|\]|]', '', item)
# quad = quad + [term,]
# op = quad[0]
# print(quad)
# print(self.instruction_pointer)
op = quadruplo[0]
print("dispatch", self.instruction_pointer, quadruplo)
if op in self.operations:
self.operations[op](quadruplo, table)
elif op in self.modulos:
self.modulos[op](quadruplo, table)
elif op in self.variables:
self.variables[op](quadruplo, table)
elif op in self.dis_map:
self.dis_map[op](quadruplo, table)
else:
raise RuntimeError("Unknown opcode: '%s'" % op)
def programa(self, quadruplo, table):
scope = Scope()
scope.name ="global"
self.scope_stack.push(scope)
def global1(self, quadruplo, table):
varglobal, tipo, var = quadruplo
self.scope_stack.top().variables_scope[var]=None
table.put(VariableSymbol(var, tipo))
def modulo(self,quadruplo, table):
# print(quadruplo)
self.scope_stack.top().param = self.scope_stack.top().num_param
# print(self.scope_stack.top().num_param)
mod, tipo, nombre = quadruplo
# funcSymbol = FunctionSymbol(nombre, tipo, [], [], self.instruction_pointer)
# print(funcSymbol.pointer)
# self.modulo_stack.push(funcSymbol)
def parametro(self,quadruplo, table):
if self.llamada_prep:
parame, var = quadruplo
self.llamada_prep.top().variables_scope["param"+str(self.llamada_prep.top().param)] = self.buscaVariable(var)
self.llamada_prep.top().param = self.llamada_prep.top().param-1
else:
parame,tipo, var = quadruplo
var1 = self.scope_stack.top().variables_scope["param"+str(self.scope_stack.top().param)]
self.scope_stack.top().variables_scope[var]=var1
self.scope_stack.top().param = self.scope_stack.top().param-1
def local(self, quadruplo, table):
local, tipolocal, varlocal = quadruplo
self.scope_stack.top().variables_scope[varlocal]=None
table.put(VariableSymbol(varlocal, tipolocal))
def regresa(self, quadruplo, table):
print("regresa quad", self.instruction_pointer, quadruplo)
# print("regresa vars_stock",self.instruction_pointer, self.scope_stack.top().variables_scope)
if len(quadruplo) == 2:
regresa, var = quadruplo
else:
regresa = quadruplo
varx = self.buscaVariable(var)
# print("regresa var", self.instruction_pointer,var)
# print("regresa varx", self.instruction_pointer,varx)
print("regresa varx", self.instruction_pointer,varx)
variable_regresa = self.return_stack.top()["return_variable"]
self.return_stack.top()[variable_regresa]=varx
self.instruction_pointer = self.return_stack.top()["pointer"]
# print("regresa pointer", self.return_stack.top()["pointer"])
# print("regresa vars_stock",self.instruction_pointer, self.scope_stack.top().variables_scope)
# print("regresa vars_return", self.return_stack.top())
self.return_stack.pop()
self.scope_stack.pop()
self.scope_stack.top().variables_scope[variable_regresa]=varx
def goto(self, quadruplo, table):
got, line = quadruplo
if isinstance(line, int) and 0 <= line < len(self.code):
self.instruction_pointer = line
else:
raise RuntimeError("GOTO address must be a valid integer.")
def gosub(self, quadruplo, table):
goto, var, line = quadruplo
returnS = {}
returnS[var]=None
returnS["return_variable"]=var
returnS["pointer"]=self.instruction_pointer
if isinstance(line, int) and 0 <= line < len(self.code):
self.instruction_pointer = line
self.return_stack.push(returnS)
if self.llamada_prep:
self.scope_stack.push(self.llamada_prep.top())
self.llamada_prep.pop()
print("goto",self.scope_stack.top().name)
else:
raise RuntimeError("GOTO address must be a valid integer.")
def principal(self,quadruplo, table):
principal = Scope()
principal.name = "principal"
self.scope_stack.push(principal)
def si(self, quadruplo, table):
# print(quadruplo)
# print("si",self.scope_stack.top().variables_scope)
si, var, num = quadruplo
# print(num)
varx = self.buscaVariable(var)
# print(" - type %s, value '%s'" % (type(varx), varx))
if not varx:
self.instruction_pointer=num
def end_si(self):
pass
def repite(self):
pass
def hasta(self):
pass
def mientras(self):
pass
def end_mientras(self):
pass
def llamada(self, quadruplo, table):
tipo, name, num = quadruplo
scope_llamada = Scope()
scope_llamada.name = name
scope_llamada.param = num
scope_llamada.num_param = num
self.llamada_prep.push(scope_llamada)
# param = self.code[self.instruction_pointer:self.instruction_pointer+num]
# for item in param:
# var = self.buscaVariable(item[1])
# scope_llamada.variables_scope[item[1]]=var
# print("llamada", self.instruction_pointer+num+1)
# self.instruction_pointer = self.instruction_pointer+num
def print(self):
pass
def read(self):
pass
def asigna(self, quadruplo, table):
# print("asigna", quadruplo)
igual, res, vac, var = quadruplo
result = self.buscaVariable(res)
try:
self.scope_stack.top().variables_scope[var]=result
except:
self.scope_stack[0].variables_scope[var]=result
def buscaVariable(self,var):
try:
x = self.scope_stack.top().variables_scope[var]
return x
except:
try:
x = self.scope_stack[0].variables_scope[var]
return x
except:
return var
def getValores(self, var1, var2, table):
if not table.get(var1) and not table.get(var2):
return self.buscaVariable(var1), self.buscaVariable(var2)
elif table.get(var1) and not table.get(var2):
var = self.buscaVariable(var1)
if var:
return var,var2
else:
print("Variable sin asignar " + str(var1) + " en la linea " + str(self.instruction_pointer))
sys.exit(0)
elif not table.get(var1) and table.get(var2):
var = self.buscaVariable(var2)
if var:
return var1,var
else:
print("Variable sin asignar " + str(var2) + " en la linea " + str(self.instruction_pointer))
sys.exit(0)
var = self.buscaVariable(var2)
elif table.get(var1) and table.get(var2):
var = self.buscaVariable(var1)
varx = self.buscaVariable(var2)
if var:
return var,varx
else:
print("Variable sin asignar " + str(var1) + " en la linea " + str(self.instruction_pointer))
print("Variable sin asignar " + str(var2) + " en la linea " + str(self.instruction_pointer))
sys.exit(0)
def plus(self, quadruplo, table):
op, oper1, oper2, var = quadruplo
# operando1, operando2 = self.getValores(oper1, oper2, table)
operando1 = self.buscaVariable(oper1)
operando2 = self.buscaVariable(oper2)
# print("mas", operando1)
# print("mas", operando2)
resultado = operando1+operando2
self.scope_stack.top().variables_scope[var]=resultado
#self.push(self.pop() + self.pop())
def exit(self):
sys.exit(0)
def minus(self, quadruplo, table):
op, oper1, oper2, var = quadruplo
# operando1, operando2 = self.getValores(oper1, oper2, table)
# print(self.scope_stack.top().variables_scope)
operando1 = self.buscaVariable(oper1)
operando2 = self.buscaVariable(oper2)
# print("menos", operando1)
# print("menos", operando2)
resultado = operando1-operando2
self.scope_stack.top().variables_scope[var]=resultado
def mul(self, quadruplo, table):
# print(self.scope_stack.top().variables_scope)
op, oper1, oper2, var = quadruplo
# operando1, operando2 = self.getValores(oper1, oper2, table)
operando1 = self.buscaVariable(oper1)
operando2 = self.buscaVariable(oper2)
# print("mul", operando1)
# print("mul", operando2)
resultado = operando1*operando2
self.scope_stack.top().variables_scope[var]=resultado
def div(self, quadruplo, table):
op, oper1, oper2, var = quadruplo
# operando1, operando2 = self.getValores(oper1, oper2, table)
operando1 = self.buscaVariable(oper1)
operando2 = self.buscaVariable(oper2)
# print("div", operando1)
# print("div", operando2)
resultado = operando1/operando2
self.scope_stack.top().variables_scope[var]=resultado
def mod(self, quadruplo, table):
pass
# last = self.pop()
# self.push(self.pop() % last)
def condicion(self, quadruplo, table):
op, oper1, oper2, var = quadruplo
# operando1, operando2 = self.getValores(oper1, oper2, table)
operando1 = self.buscaVariable(oper1)
operando2 = self.buscaVariable(oper2)
# print("cond", operando1)
# print("cond", operando2)
for case in switch(op):
if case('<'):
if operando1 < operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('>'):
if operando1 > operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('<='):
if operando1 <= operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('>='):
if operando1 >= operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('=='):
if operando1 == operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('!='):
if operando1 != operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('&&'):
if operando1 and operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case('||'):
if operando1 or operando2:
self.scope_stack.top().variables_scope[var]=True
else:
self.scope_stack.top().variables_scope[var]=False
break
if case():
print("something else!")
def dup(self):
self.push(self.top())
def over(self):
b = self.pop()
a = self.pop()
self.push(a)
self.push(b)
self.push(a)
def drop(self):
self.pop()
def swap(self):
b = self.pop()
a = self.pop()
self.push(b)
self.push(a)
def print(self,quadruplo, table):
escribe, num = quadruplo
param = self.code[self.instruction_pointer:self.instruction_pointer+num]
for item in param:
var = self.buscaVariable(item[1])
sys.stdout.write("escribe: %s\n" % str(var))
sys.stdout.flush()
self.instruction_pointer = self.instruction_pointer+num
def println(self):
sys.stdout.write("%s\n" % self.pop())
sys.stdout.flush()
def read(self):
self.push(get_input())
def cast_int(self):
self.push(int(self.pop()))
def cast_str(self):
self.push(str(self.pop()))
def eq(self):
# self.push(self.pop() == self.pop())
pass
def if_stmt(self):
false_clause = self.pop()
true_clause = self.pop()
test = self.pop()
self.push(true_clause if test else false_clause)
def jmp(self):
addr = self.pop()
if isinstance(addr, int) and 0 <= addr < len(self.code):
self.instruction_pointer = addr
else:
raise RuntimeError("JMP address must be a valid integer.")
def dump_stack(self):
print("\nstack", self.scope_stack.top().variables_scope)
print("\nData stack (top first):")
for v in reversed(self.data_stack):
print(" - type %s, value '%s'" % (type(v), v)) |
987,883 | 7cc3f6d5255a514bacd6691786f810e00e0f7bcf | from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
#讓celery使用django環境裡的設定
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shop.settings')
app = Celery('shop')
#namespace: 從settings讀取所有'Celery'開頭的參數
app.config_from_object('django.conf:settings', namespace='CELERY')
#從django定義的所有app folder,尋找celery註冊的function
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) |
987,884 | 73447c4722488f60715400b46299d9fc1685169b | # Generated by Django 2.1.7 on 2020-05-13 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taskmanager', '0014_auto_20200513_0700'),
]
operations = [
migrations.AlterField(
model_name='lastlogin',
name='current',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='lastlogin',
name='previous',
field=models.DateTimeField(null=True),
),
]
|
987,885 | 873b3fc986955037ad1875b7ba4b4bf64715286e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 12:23:22 2019
@author: thomas
"""
import matplotlib.pyplot as plt
import numpy as np
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 16}
plt.rc('font', **font)
# time axis
Tmax=3.0
N=1024
n=np.arange(-N/2.0,N/2.0,1)
Dt=2*Tmax/N
t = n*Dt
# frequency axis
Df=1.0/2.0/Tmax
f=n*Df
# frequency response
f3dB=5.0
H=1.0/(1j*f/f3dB+1)
# number of iterations
Nit=1000
YY=np.zeros([N,N])
for m in range(0,Nit):
x=np.random.randn(N)
# calculate spectrum at input
Xf=Dt*np.fft.fftshift(np.fft.fft(np.fft.fftshift(x)))
# Multiply with transfer function
Yf=Xf*H
# Back to the time domain
y0=1.0/Dt*np.fft.fftshift(np.fft.ifft(np.fft.fftshift(Yf)))
y=np.asmatrix(y0)
yt=np.transpose(y)
Y=np.multiply(yt,y)
YY=Y+YY
YY=1.0/Nit*YY
# calculate autocorrelation
Rnn=np.array(YY[int(N/2)])[0]
# Calculate power spectral density
Sn=Dt*np.fft.fftshift(np.fft.fft(np.fft.fftshift(Rnn)))
plt.figure(1)
plt.plot(t,np.real(y0))
plt.xlim([-0.5,0.5])
plt.ylabel('$N_k(t)$')
plt.xlabel('$t$')
plt.tight_layout()
plt.savefig('outputsystem.png')
plt.figure(2)
plt.plot(t,np.real(Rnn))
plt.xlim([-0.5,0.5])
plt.grid()
plt.xlabel('$\\tau$')
plt.ylabel('$R_{NN}( \\tau )$')
plt.tight_layout()
plt.savefig('outputRXX.png')
plt.figure(3)
plt.plot(f,np.abs(Sn),label='Numerical')
plt.plot(f,Dt*np.abs(H)*np.abs(H),label='Analytical',linewidth=3.0)
plt.xlim([-10.0,10.0])
plt.grid()
plt.xlabel('$f$')
plt.ylabel('$S_{N}(f)$')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.tight_layout()
plt.savefig('outputSn.png')
|
987,886 | 0af8c28451a502ef78d5b7d0ad7443e8de5fee34 | import socket
import sys
import hashlib # used to compute checksum of files
from PIL import Image # used to display images
# function returns checksum of a given file
def getChecksum(filename):
with open(filename, "rb") as f:
bytes = f.read()
# creates a hash code using md5 encryption and puts it in hexadecimal
readableHash = hashlib.md5(bytes).hexdigest()
return readableHash
# main function to connect to socket and send and recieve files
def Main(csocket):
# connect to server socket
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cli_sock.connect((sys.argv[1], csocket))
print("Connected - ")
# check if there is a third arguement
try:
if sys.argv[3] == "put":
# code for client to upload a file to server
# start by sending server "put" so the server knows what the client wants to do
s = "put"
cli_sock.sendall(s.encode('utf-8'))
# send server the calculated checksum of the file the client wants to upload
filename = sys.argv[4]
checksum = getChecksum(filename)
cli_sock.sendall(checksum.encode('utf-8'))
# open binary version of file and send data over
f = open(filename,"rb")
bits = f.read(1024)
while (bits):
cli_sock.sendall(bits)
print("Sending data...")
bits = f.read(1024)
f.close()
# file finishes uploading
print("File "+ filename + " sent")
cli_sock.close()
elif sys.argv[3]=="get":
# code for client to downlaod a file to server
# client decides name of new file, will not allow client to
# overwrite a file which already exists
done = False
while(not done):
userFilename = input("What would you like to call the file?: ")
try:
f = open(userFilename)
f.close()
print("File already exists, choose another file name.")
except FileNotFoundError:
done = True
# code for client to download file from server
# start by sending sever "get" so the server knowns that the client wants to do
s = "get"
cli_sock.sendall(s.encode('utf-8'))
cli_sock.recv(1024).decode('utf-8')
# send to server name of file the client wants to download
filename = sys.argv[4]
cli_sock.sendall(filename.encode('utf-8'))
# recieve checksum for that file from server
srvChecksum = cli_sock.recv(1024).decode('utf-8')
# open a new file in binary mode to write data
with open(userFilename, "wb") as f:
while True:
print("Receiving data...")
data = cli_sock.recv(1024)
if not data:
break
f.write(data)
print("Writing")
f.close()
# all data has now been downloaded and written to file
# computes checksum of the file it has created
cliChecksum = getChecksum(userFilename)
# checks the checksum received earlier from server agaisnt the
# checksum of the file that data has been written to
if srvChecksum == cliChecksum:
print("Successfully recieved and written to file")
try:
# if the file that was downlaoded from the server was a picture
img = Image.open(userFilename)
imgOpen = input("Do you want to open the image that you have been sent? (y/n): ")
if imgOpen == "y":
# open uploaded picture
img.show()
elif imgOpen == "n":
print("Okay, closing connection.")
except:
pass
cli_sock.close()
else:
# if the checksums don't match
print("WARNING: some file data lost")
cli_sock.close()
elif sys.argv[3] == "list":
# code for if client wants to see a list of possible files server can send
fileList = []
s = "list"
cli_sock.sendall(s.encode('utf-8'))
# loop to recieve and add each file name to an array as they come
while (True):
file = cli_sock.recv(1024).decode('utf-8')
if file == "done":
break
else:
fileList.append(file)
# display all file names client received
for i in fileList:
print(i)
cli_sock.close()
# if there is no third arguement ie client doesn't want to upload or download files
except:
# start by sending "notfile" so server knows what client wants to do
s = "notfile"
cli_sock.sendall(s.encode('utf-8'))
# send client's name to server and receive server's name
name = input("Name: ")
print("Waiting for connection")
srv_name = cli_sock.recv(1024).decode('utf-8')
print(srv_name + " has connected to chat")
cli_sock.sendall(name.encode('utf-8'))
# loop to recieve and send messages
done = True
while (done == True):
try:
request = cli_sock.recv(1024)
print(srv_name +": " + request.decode('utf-8'))
message = input("Me: ")
if message == "QUIT":
cli_sock.close()
else:
cli_sock.sendall(message.encode('utf-8'))
# if the socket has disconnected
except:
userAnswer = input("The socket has disconnected, would you like to reconnect? (y/n): ")
if userAnswer == "y":
# connect to a new socket with the last port number +1
Main(int(csocket)+1)
elif userAnswer == "n":
print("Okay, closing socket")
cli_sock.close()
done = False
# calls main function to start program
Main(int(sys.argv[2]))
|
987,887 | 814221e98d5e65c2e1b572f46649600f5dfe5c94 | s = "Salam Almaty. We are are from Dushanbe. Aga"
myDict = {}
def isGoodWord(word):
numOfvowels = 0
for i in word:
if i == "a" or i == "e" or i == "i" or i == "o" or i == "u":
numOfvowels += 1
if numOfvowels > (len(word) - numOfvowels):
return True
else:
return False
def checkEachWord(text):
for i in text.split():
i = i.lower()
if isGoodWord(i) == True:
if i in myDict:
myDict[i] += 1
else:
myDict[i] = 1
checkEachWord(s)
print(myDict) |
987,888 | f2108b73858a8aca43e7a1f8360abbb9a73fd7c9 | """
Contains relevant methods for removing stopwords during preprocessing phase
"""
stopwords_list = {'i', 'me', 'my', 'myself', 'we', 'us', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'would', 'should', 'could', 'ought', "i'm", "you're", "he's", "she's", "it's", "we're", "they're", "i've", "you've", "we've", "they've", "i'd", "you'd", "he'd", "she'd", "we'd", "they'd", "i'll", "you'll", "he'll", "she'll", "we'll", "they'll", "isn't", "aren't", "wasn't", "weren't", "hasn't",
"haven't", "hadn't", "doesn't", "don't", "didn't", "won't", "wouldn't", "shan't", "shouldn't", "can't", 'cannot', "couldn't", "mustn't", "let's", "that's", "who's", "what's", "here's", "there's", "when's", "where's", "why's", "how's", 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very'}
def remove_stopwords(text):
text = [i for i in text if not i in stopwords_list]
return text
if __name__ == "__main__":
# building stopwords_list from stopwords.txt
f = open("stopwords.txt", "r")
stopwords_list = []
for x in f:
x = x.strip()
if len(x) > 0 and x[0] != '|':
stopwords_list.append(x.split()[0])
print(stopwords_list)
|
987,889 | ec1ac472d4cd3521755a26d8207b15c77bf7103a | ## 3.Реализовать базовый класс Worker (работник), в котором определить атрибуты:
## name, surname, position (должность), income (доход).
# Последний атрибут должен быть защищенным и ссылаться на словарь, содержащий элементы:
# оклад и премия, например, {"wage": wage, "bonus": bonus}.
## Создать класс Position (должность) на базе класса Worker.
# В классе Position реализовать методы получения полного
# имени сотрудника (get_full_name) и дохода с учетом премии (get_total_income).
# Проверить работу примера на реальных данных (создать экземпляры класса Position,
# передать данные, проверить значения атрибутов, вызвать методы экземпляров).
class Worker:
def __init__(self, name, surname, position, income):
self.name = name
self.surname = surname
self.position = position
self._income = income # {"wage": wage, "bonus": bonus}. не получилось
# передать словарь в метод расчета зп (только через геттер)
class Position(Worker):
def get_full_name(self):
return f'сотрудник по имени {self.name} и по фамилии {self.surname}'
def get_total_income(self):
return self._income
def calc_salary(self, income):
value_sum = sum(income.values())
print(f'зп сотрудника {self.surname} {self.name} с учетом премии == {value_sum}')
position_worker1 = Position('Иван', 'Иванов', 'дворник', {"wage": 5000, "bonus": 1000})
position_worker2 = Position('Петр', 'Петров', 'бух', {"wage": 10000, "bonus": 3000})
print(position_worker1.get_full_name())
print(position_worker1.get_total_income())
position_worker1.calc_salary(position_worker1.get_total_income())
position_worker2.calc_salary(position_worker2.get_total_income())
|
987,890 | 2ddba794c5ab16dd5f491973260dfcb439446559 | # -*- coding: utf-8 -*-\
import os
import sys
import json
import codecs
from datetime import datetime, date
import time
import requests
import random
import threading
from flask import Flask, request, render_template
from flask_sqlalchemy import SQLAlchemy
import tweepy
import discover
import worldProcessing
import discoverMemory
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ["DATABASE_URL"]
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
MYDIR = os.path.dirname(__file__)
def get_self():
return app
def set_database():
global db
db = discoverMemory(SQLAlchemy(app))
def get_database():
global db
if db is not None:
return db
else:
db = SQLAlchemy(app)
return db
# configurating Discover
global timestart, routine
routine = []
activity_start = ['09', '10', '11', '12']
activity_end = ['00', '01', '02', '03', '04', '05']
@app.before_first_request
def automatic():
global routinedisc
worldProcessing.init_discover()
print("Discover Chan setted-up!")
worldProcessing.init_discover()
routinedisc = worldProcessing.init_routine()
scheduler = BackgroundScheduler()
scheduler.add_job(func=discover_iterate, trigger="interval",minutes=15)
scheduler.start()
# send_message(u'1579846222104780', worldProcessing.return_thoughts())
time.sleep(10)
return "Huh", 200
def discover_iterate():
routinedisc.time_process()
story = routinedisc.resolve_story()
send_message(u'1579846222104780', str(story))
@app.route('/', methods=['GET'])
def verify():
# when the endpoint is registered as a webhook, it must echo back
# the 'hub.challenge' value it receives in the query arguments
if request.args.get("hub.mode") == "subscribe" and request.args.get("hub.challenge"):
if not request.args.get("hub.verify_token") == os.environ["VERIFY_TOKEN"]:
return "Verification token mismatch", 403
return request.args["hub.challenge"], 200
if routinedisc and routinedisc.char:
return render_template("home.html", location=routinedisc.char.location.names["where"],hunger=str(routinedisc.char.hunger), mood = str(routinedisc.char.mood))
else:
return "404",200
x = 1
@app.route('/Status', methods=['GET'])
def report_status():
return worldProcessing.return_thoughts(), 200
@app.route('/', methods=['POST'])
def webhook():
# endpoint for processing incoming messaging events
data = request.get_json()
log(data) # you may not want to log every incoming message in production, but it's good for testing
if data["object"] == "page":
for entry in data["entry"]:
for messaging_event in entry["messaging"]:
if messaging_event.get("message"): # someone sent us a message
my_id = "0"
sender_id = messaging_event["sender"]["id"] # the facebook ID of the person sending you the message
recipient_id = messaging_event["recipient"][
"id"] # the recipient's ID, which should be your page's facebook ID
if messaging_event.get("message").get("text"):
message_text = messaging_event["message"]["text"] # the message's text
worldProcessing.process(messaging_event)
if messaging_event.get("delivery"): # delivery confirmation
pass
if messaging_event.get("optin"): # optin confirmation
pass
if messaging_event.get("postback"): # user clicked/tapped "postback" button in earlier message
pass
return 'ok', 200
def send_message(recipient_id, message_text):
log("sending message to {recipient}: {text}".format(recipient=recipient_id, text=message_text))
params = {
"access_token": os.environ["PAGE_ACCESS_TOKEN"]
}
headers = {
"Content-Type": "application/json"
}
data = json.dumps({
"recipient": {
"id": recipient_id
},
"message": {
"text": message_text
}
})
r = requests.post("https://graph.facebook.com/v2.6/me/messages", params=params, headers=headers, data=data)
if r.status_code != 200:
log(r.status_code)
log(r.text)
def log(msg, *args, **kwargs): # simple wrapper for logging to stdout on heroku
try:
if type(msg) is dict:
msg = json.dumps(msg)
else:
msg = msg.decode('utf-8')
msg = unicode(msg).format(*args, **kwargs)
print u"{}: {}".format(datetime.now(), msg)
except UnicodeEncodeError:
pass # squash logging errors in case of non-ascii text
sys.stdout.flush()
# checking out status of the server if it's already online.
def check_status():
def start_loop():
not_started = True
while not_started:
print('In start loop')
try:
r = requests.get('https://discoverchan.herokuapp.com')
if r.status_code == 200:
print('Server started, quiting start_loop')
not_started = False
print(r.status_code)
except:
print('Server not yet started')
time.sleep(2)
print('Started runner')
thread = threading.Thread(target=start_loop)
thread.start()
def init():
global twitter
consumer_key = os.environ["CONSUMER_KEY_TWITTER"]
consumer_secret = os.environ["CONSUMER_SECRET_TWITTER"]
access_key = os.environ["ACCESS_KEY_TWITTER"]
access_secret = os.environ["ACCESS_SECRET_TWITTER"]
print(consumer_key)
print('succes')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
twitter = tweepy.API(auth)
def tweet(twit):
if len(twit) <= 140 and len(twit) > 0:
twitter.update_status(twit) # обновляем статус (постим твит)
return True
else:
return False
init()
if __name__ == '__main__':
print("Server response")
# checking status of server before running first functions
check_status()
worldProcessing.init_discover()
# initiliazing twitter
# remembering the time we started
timestart = datetime.now()
print(str(timestart))
# running the server
app.run(debug=True, use_reloader=False)
discoverMemory.init(app)
|
987,891 | 0b41ddcdb567dc34123d05007368680af1b98fb5 | def solution(n):
answer = []
for i in range(1, n + 1):
lst = [0 for j in range(i)]
answer.append(lst)
answer[n - 1].append(-1)
answer.append([-1 for k in range(n)]) # row-padding
direc = [[1, 0], [0, 1], [-1, -1]]
sw = int(0)
lim = int((n * (n + 1)) / 2)
r, c = 0, 0
for cnt in range(1, lim + 1):
answer[r][c] = cnt
if answer[r + direc[sw][0]][c + direc[sw][1]] != 0:
sw = (sw + 1) % 3
r += direc[sw][0]
c += direc[sw][1]
result = []
for i in range(n):
for j in range(len(answer[i])):
result.append(answer[i][j])
result.pop()
return result
|
987,892 | f0358e3df11b177676bd3526bbdd251dde6a9f03 | """ this file is used to perform helper methods."""
import os
import sys
from prettytable import PrettyTable
from common.constants import Color, Base, BackButton
from pynput.keyboard import Key, Listener
def always_true():
""" this method is used for infinite loops, so that later while unit testing we can set it false"""
return True
def raw_data_to_table(raw_data, cursor):
"""
this method is used to print table form list/tuple data.
:param raw_data: list/tuple, raw rows data.
:param cursor: cursor, using cursor description to get columns tuples.
"""
table = PrettyTable()
# setting table field names
table.field_names = [column[0] for column in cursor.description]
for row in raw_data:
table.add_row(row)
print(table)
class Menu:
"""
This is a class for printing menu for given list.
Attributes:
index (int): index of cursor.
count (int): count of elements in list.
flag (int): flag to exit from menu.
"""
def __init__(self):
"""
initializing menu attributes.
"""
self.index = 0
self.count = 0
self.flag = 0
def on_press(self, key):
"""
this method is used to change index and flag attributes on the basis of user keyboard press.
:param key: Key, keyboard pressed key.
:return: bool, False.
"""
try:
if 'up' == key.name:
if self.index > 0:
self.index -= 1
else:
self.index = self.count
elif 'down' == key.name:
if self.index < self.count:
self.index += 1
else:
self.index = 0
elif key == Key.enter:
# Stop listener
self.flag += 1
return False
except:
return False
def draw_menu(self, items):
"""
this method is used to print items on console and return the selected item.
:param items: list, items to print on menu.
:return: string, selected item name from menu.
"""
self.count = len(items) - 1
while True:
if self.flag >= 1:
if items[self.index] == str(BackButton.EXIT.name):
print("exiting from system")
sys.exit()
else:
return items[self.index]
for x in range(0, self.count+1):
if x == self.index:
print(Color.B_LightGray + Color.F_Black + items[x] + Base.END)
elif x == self.count:
print(Color.F_Red + items[x] + Base.END)
else:
print(items[x])
with Listener(
on_press=self.on_press
) as listener:
listener.join()
os.system('clear')
# providing if statement in last to emulate do while loop
if not always_true():
break
|
987,893 | 0300c4805554c672261689c89cfbab8422753313 | import sys
import os
sys.path.append(os.environ['MLIR_LIBRARY_PATH'])
import mlir
# Test Module constructor with mlir file.
def test_constructor():
mlir.registerAllDialects()
ctx = mlir.Context()
sourcemgr = mlir.SourceMgr()
module = mlir.Module("./test_input.mlir", ctx, sourcemgr)
return module
|
987,894 | fb189a4493ee81fd2598f9d1a5d6773e5dd07c32 | import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
def creat_cross_sample():
group_galaxies = pd.read_csv('group_galaxies.csv', engine='c')
group_galaxy_type = pd.read_table('group_galaxy_type.dat', engine='c', header=None)
group_galaxies['typ'] = [galaxy_types[i[0]] for i in group_galaxy_type.values]
cross = group_galaxies[~np.isnan(group_galaxies.gz2id)]
gz2 = pd.read_csv('GZ2.csv', engine='c')
cross['dr7id'] = gz2.ix[cross.gz2id, 'dr7objid'].values
cross['bar_debiased_fraction'] = gz2.ix[cross.gz2id, 't03_bar_a06_bar_debiased'].values
n4_bdd = pd.read_csv('n4_bdd.csv', engine='c', index_col='col0')
cross['B_T_n4'] = n4_bdd.ix[cross.dr7id, 'col14'].values
cross['Re_n4'] = n4_bdd.ix[cross.dr7id, 'col22'].values
cross['be_n4'] = n4_bdd.ix[cross.dr7id, 'col24'].values
cross['Rd_n4'] = n4_bdd.ix[cross.dr7id, 'col28'].values
cross['dia_n4'] = n4_bdd.ix[cross.dr7id, 'col30'].values
fn_bdd = pd.read_csv('fn_bdd.csv', engine='c', index_col='col0')
cross['B_T_fn'] = fn_bdd.ix[cross.dr7id, 'col14'].values
cross['Re_fn'] = fn_bdd.ix[cross.dr7id, 'col22'].values
cross['be_fn'] = fn_bdd.ix[cross.dr7id, 'col24'].values
cross['Rd_fn'] = fn_bdd.ix[cross.dr7id, 'col28'].values
cross['dia_fn'] = fn_bdd.ix[cross.dr7id, 'col30'].values
cross_gz2_group = cross[['galaxyid', 'gz2id', 'z', 'petro_abs_mag', 'gz2class', 'bar_debiased_fraction', 'typ',
'B_T_n4', 'Re_n4', 'be_n4', 'Rd_n4', 'dia_n4',
'B_T_fn', 'Re_fn', 'be_fn', 'Rd_fn', 'dia_fn']]
cross_gz2_group.to_csv('cross_gz2_group_bdd.csv', index=None)
def control_sample():
cross = pd.read_csv('cross_gz2_group_bdd.csv', engine='c')
vls = cross[(~np.isnan(cross.B_T_n4)) & (cross.B_T_n4 > -1) &
(cross.z < 0.06) & (cross.petro_abs_mag < -19.38)]
vls['kind'] = 'unbar'
bar_sample = vls[(vls.bar_debiased_fraction > 0.5) & (vls.gz2class.str.contains('B'))]
bar_sample['kind'] = 'bar'
# g = sns.regplot(x='z', y='petro_abs_mag', data=bar_sample, scatter_kws={'s': 7}, color=flatui[1], fit_reg=False)
# g.set(ylim=(-19, -22), title='Scatter of Mr-z and mean(B/T)')
# h = g.twinx()
# h.plot(np.arange(0.02, 0.06, 0.005)+0.0025,
# [np.mean(bar_sample[(bar_sample.z < z+0.005) & (bar_sample.z > z)]['B_T_n4']) for z in np.arange(0.02, 0.06, 0.005)],
# 'kv--')
# h.set(ylim=(0, 0.6), ylabel='mean(B/T)')
ctrl = pd.DataFrame()
for i in bar_sample.index[:]:
bs = bar_sample.ix[i]
cubs = vls[(abs(vls.z - bs.z) < 0.01) &
(abs(vls.petro_abs_mag - bs.petro_abs_mag) < 0.1) &
(abs(vls.B_T_n4 - bs.B_T_n4) < 0.01) &
(vls.gz2id != bs.gz2id)]
if len(cubs) >= 3:
cubs['diff_B_T_n4'] = abs(cubs.B_T_n4 - bs.B_T_n4)
cubs['diff_Mr'] = abs(cubs.petro_abs_mag - bs.petro_abs_mag)
cubs.sort_values(inplace=True, by=['diff_B_T_n4', 'diff_Mr'])
cubs.drop(['diff_B_T_n4', 'diff_Mr'], 1, inplace=True)
ctrl = ctrl.append(bs.to_frame().T.append(cubs[:3]))
print(len(ctrl[(ctrl.kind == 'unbar') & (ctrl.gz2clss.str.contain('B'))]), len(ctrl))
# print(control_unbar_sample)
def verify():
gz2 = pd.read_csv('GZ2.csv', engine='c')
n4_bdd = pd.read_csv('n4_bdd.csv', engine='c')
# meta = pd.read_csv('gz2sample.csv', engine='c')
# from astropy.coordinates import SkyCoord
# from astropy import units as u
# idx,sep2d,d3d = SkyCoord(ra=gz2.ra2, dec=gz2.dec2, unit=u.deg, frame='icrs').match_to_catalog_sky(SkyCoord(ra=n4_bdd.ra,dec=bar.dec,unit=u.deg,frame='icrs'),nthneighbor=1)
def plot_distribution():
cross = pd.read_csv('cross_gz2_group_bdd.csv', engine='c')
vls = cross[(cross.z < 0.06) & (cross.petro_abs_mag < -19.38) & (~np.isnan(cross.B_T_n4)) & (cross.B_T_n4 > -1)]
# B_T distribution
vls = vls[~vls.gz2class.str.contains('B')]
sns.distplot(vls[vls.petro_abs_mag > -20].B_T_n4, bins=np.linspace(0, 1, 11), rug=False, kde=False, norm_hist=False,
hist=True, hist_kws={'histtype': 'step', 'linewidth': 3}, color='blue').set(xlim=(-0.04, 1.04))
sns.distplot(vls[(vls.petro_abs_mag > -21) & (vls.petro_abs_mag < -20)].B_T_n4, bins=np.linspace(0, 1, 11), rug=False, kde=False, norm_hist=False,
hist=True, hist_kws={'histtype': 'step', 'linewidth': 3}, color='red').set(xlim=(-0.04, 1.04))
sns.distplot(vls[vls.petro_abs_mag < -21].B_T_n4, bins=np.linspace(0, 1, 11), rug=False, kde=False, norm_hist=False,
hist=True, hist_kws={'histtype': 'step', 'linewidth': 3}, color='green').set(xlim=(-0.04, 1.04))
plt.legend(('-20 < Mr < -19.38', '-21 < Mr < -20', 'Mr < -21'))
if __name__ == '__main__':
warnings.filterwarnings('ignore')
pd.set_option('display.width', 200)
flatui = ["#95a5a6", "#3498db", "#9b59b6", "#e74c3c", "#34495e", "#2ecc71", "#1de9b6", "#827717"]
galaxy_types = {
-1: 'Unclassifiable',
0: 'Not used',
1: 'SF',
2: 'low S/N SF',
3: 'Composite',
4: 'AGN non-Liner',
5: 'Low S/N Liner'
}
# creat_cross_sample()
control_sample()
# plot_distribution()
sns.plt.show()
|
987,895 | 070f0f40107059c10058c5f4cd32ff3c1a80dca7 | # Compare similarity between two images by using VGG16 as a feature extractor and cosine similarity as a distance metric
# Author: Pappu Kumar Yadav
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
from pickle import dump
import pickle
import matplotlib.pyplot as plt
from scipy import stats
from scipy import spatial
from scipy.stats import f_oneway
# load an image from file
image1 = load_img('./images/four.png', target_size=(224, 224))
#image1 = load_img('./images/three.png', target_size=(224, 224))
image2 = load_img('./images/two_1.png', target_size=(224, 224))
# convert the image pixels to a numpy array
image1 = img_to_array(image1)
image2 = img_to_array(image2)
# reshape data for the VGG16 model
image1 = image1.reshape((1, image1.shape[0], image1.shape[1], image1.shape[2]))
image2 = image2.reshape((1, image2.shape[0], image2.shape[1], image2.shape[2]))
# prepare the image for the VGG16 model
image1 = preprocess_input(image1)
image2 = preprocess_input(image2)
# load model
model = VGG16()
# remove the output layer so as to use VGG16 as feature extractor and not as a classifier
model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
# get extracted features
features1 = model.predict(image1)
features2 = model.predict(image2)
print(features1.shape)
print(features2.shape)
def image_similarity(vector1, vector2):
result = 1 - spatial.distance.cosine(vector1, vector2)
return result
fig, plot_num = plt.subplots(3)
plot_num[0].plot(features1[0])
plot_num[1].plot(features2[0])
plot_num[2].scatter(features1[0],features2[0])
aa=image_similarity(features1,features2)
print(f"similarity={aa}")
print(aa)
bb=stats.pearsonr(features1[0],features2[0])
print(bb)
# save to file
dump(features1, open('features1.pkl', 'wb'))
dump(features2, open('features2.pkl', 'wb'))
with open('/content/features1.pkl', 'rb') as f:
data1 = pickle.load(f)
with open('/content/features2.pkl', 'rb') as f:
data2 = pickle.load(f)
with open("features1.txt", "wb") as abc:
abc.write(data1)
with open("features2.txt", "wb") as abc:
abc.write(data2) |
987,896 | d8b4fde08bfa3b603cc3e288c2e7c962661a5696 |
from application import app
from application import lm
from flask import render_template
from lib.factory.StorageLocation import StorageLocation as DocFactory
from lib.config.Yaml import Yaml as Config
from lib.job.storage.MongoDB import MongoDB as JobStorage
from pymongo import MongoClient
import re
from werkzeug.security import generate_password_hash
from lib.location.Wiki import Wiki
from lib.location.GMap import GMap
from lib.logger.MongoDB import MongoDB as MongoDBLog
import hashlib
from bson.objectid import ObjectId
from flask import request
from flask import redirect
from flask import url_for
from lib.factory.Loader import Loader as LoaderFactory
from lib.parser.map.google.GMapFactory import GMapFactory as MapFactory
from lib.spider.Spider import Spider
from application.forms import LoginForm
from application.user import User
from flask.ext.login import login_user, logout_user, login_required
from flask import request, redirect, render_template, url_for, flash, session, abort
from lib.keygen.gmap_keygen import Keygen
from lib.factory.Loader import Loader
import json
import requests
import random
import os
import urllib.parse
def escape(val):
return re.escape(str(val))
@app.route("/change_limit", methods=['GET', 'POST'] )
@login_required
def Limites():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
db.keygen.update_one({'key':request.form['key'] }, {'$set': {'geocode': 0, "place":0, "distance":0}}, False, True)
return request.form['key']
@app.route("/")
@login_required
def index():
return render_template('admin/index.html')
def generate_code():
return str(random.randrange(1000, 9999))
def send_confirmation_code(to_number):
verification_code = generate_code()
session['verification_code'] = verification_code
headers = {'Content-type': 'application/json',
'Accept': 'text/plain',
'Content-Encoding': 'utf-8'}
data = {"to":str(to_number) , "message":"Код підтвердження авторизації Sinoptik Parser: "+str(verification_code)+""}
response = requests.post('http://sms-gate.ukr.net/sms/send', data=json.dumps(data), headers=headers)
# verification_code = generate_code()
# send_sms(to_number, verification_code)
# session['verification_code'] = verification_code
# return verification_code
@app.route("/confirm", methods=['GET', 'POST'] )
def confirm():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
user = db.users.find_one({"_id": session.get('userphone', '')}) or abort(401)
print (session.get('userphone', ''))
if request.method == 'POST':
is_valid_code = request.form['verification_code'] == session['verification_code']
is_dev_env = os.environ.get('DEV_ENV')
if is_valid_code or is_dev_env:
user_obj = User(user['_id'])
login_user(user_obj)
return redirect(url_for('index'))
return render_template('admin/login/confirm.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
if request.method == 'POST' and form.validate_on_submit():
user = db.users.find_one({"_id": form.username.data})
if user and User.validate_login(user['password'], form.password.data):
# user_obj = User(user['_id'])
session['userphone'] = user['_id']
send_confirmation_code(user['_id'])
return redirect(url_for('confirm'))
# login_user(user_obj)
flash("Logged in successfully!", category='success')
# return redirect(url_for('index'))
flash("Wrong username or password!", category='error')
return render_template('admin/login/sign-in.html', form=form)
@lm.user_loader
def load_user(username):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
u = db.users.find_one({"_id": username})
if not u:
return None
return User(u['_id'])
@app.route('/logout')
def logout():
logout_user()
session.clear()
return redirect(url_for('login'))
@app.route('/internal')
@app.route('/internal/<string:country>')
@login_required
def internal_list(country=None):
return render_template('admin/internal/list.html', country=country)
@app.route('/internal/unit/<string:id>')
@login_required
def internal_unit(id):
config = Config('./config/config.yml')
api_key = config.get('googlemaps').get('geocoding')
factory = DocFactory(config.get('mongodb'))
collection = factory.internal_collection()
obj = collection.find_one({'_id': ObjectId(id)})
return render_template('admin/internal/unit.html', data=obj, api_key=api_key)
@app.route('/internal/edit')
@app.route('/internal/edit/<string:id>')
@app.route('/internal/edit/<string:id>/<string:saved>')
@login_required
def internal_edit(id=None, saved=0):
config = Config('./config/config.yml')
api_key = config.get('googlemaps').get('geocoding')
obj = {}
if id:
factory = DocFactory(config.get('mongodb'))
collection = factory.internal_collection()
obj = collection.find_one({'_id': ObjectId(id)})
admin_levels = ['ADMIN_LEVEL_1', 'ADMIN_LEVEL_2', 'ADMIN_LEVEL_3', 'ADMIN_LEVEL_4', 'ADMIN_LEVEL_5', 'ADMIN_LEVEL_6', 'ADMIN_LEVEL_7', 'ADMIN_LEVEL_8']
languages = ['en', 'it', 'fr']
if obj:
levels = []
for level in admin_levels:
if obj.get('type') == level:
break
levels.append(level)
old_postal = obj.get('postal_codes', [])
new_postal = (str(x) for x in old_postal)
obj.update(postal_codes=new_postal)
else:
obj = {}
levels = admin_levels
return render_template('admin/internal/edit.html',
admin_levels=admin_levels,
levels=levels,
data=obj,
api_key=api_key,
languages=languages,
saved=saved
)
@app.route('/internal/save', methods=['POST'])
@login_required
def internal_save():
post = request.form.copy()
config = Config('./config/config.yml')
factory = DocFactory(config.get('mongodb'))
collection = factory.internal_collection()
obj = {}
data = internal_form_mapping(post)
if post.get('id'):
obj = collection.find_one({'_id': ObjectId(post.get('id'))})
if obj:
collection.update_one({'_id': ObjectId(post.get('id'))}, {'$set': data})
else:
result = collection.insert_one(data)
obj.update(_id=result.inserted_id)
return redirect(url_for('internal_edit', id=obj.get('_id'), saved=1))
@login_required
def internal_form_mapping(data):
obj = {
'name': data.get('name', ''),
'capital': data.get('capital', ''),
'type': data.get('type', ''),
'admin_hierarchy': {
'ADMIN_LEVEL_1': data.get('ADMIN_LEVEL_1', ''),
'ADMIN_LEVEL_2': data.get('ADMIN_LEVEL_2', ''),
'ADMIN_LEVEL_3': data.get('ADMIN_LEVEL_3', ''),
'ADMIN_LEVEL_4': data.get('ADMIN_LEVEL_4', ''),
'ADMIN_LEVEL_5': data.get('ADMIN_LEVEL_5', ''),
'ADMIN_LEVEL_6': data.get('ADMIN_LEVEL_6', ''),
'ADMIN_LEVEL_7': data.get('ADMIN_LEVEL_7', ''),
'ADMIN_LEVEL_8': data.get('ADMIN_LEVEL_8', ''),
},
'altitude': data.get('altitude', ''),
'population': data.get('population', ''),
'area': data.get('area', ''),
'density': data.get('density', ''),
'postal_codes': data.get('postal_codes', '').split(','),
'time': data.get('time', ''),
'center': {
'lat': data.get('latitude', ''),
'lng': data.get('longitude', ''),
},
'bounds': {
'left': {
'lat': data.get('left_latitude', ''),
'lng': data.get('left_longitude', '')
},
'right': {
'lat': data.get('right_latitude', ''),
'lng': data.get('right_longitude', '')
}
},
'i18n': {
'en': data.get('en', ''),
'fr': data.get('fr', ''),
'it': data.get('it', '')
}
}
return obj
@app.route('/internal/delete/<string:id>')
@login_required
def internal_delete(id):
config = Config('./config/config.yml')
factory = DocFactory(config.get('mongodb'))
collection = factory.internal_collection()
result = collection.delete_many({'_id': ObjectId(id)})
return render_template('admin/empty.html', data='ok', auto_close=True)
@app.route('/data/<string:provider_type>.js')
@app.route('/data/<string:provider_type>/<string:country>.js')
@login_required
def data_provider(provider_type, country=None):
config = Config('./config/config.yml')
factory = DocFactory(config.get('mongodb'))
document_filter = {}
if provider_type == Wiki.TYPE:
data = factory.wiki_collection()
if country:
document_filter = {
'name': {'$exists': True, '$not': {'$size': 0}},
'admin_hierarchy.ADMIN_LEVEL_1.name': country
}
elif provider_type == GMap.TYPE:
data = factory.gmaps_collection()
if country:
document_filter = {
'name': {'$exists': True, '$not': {'$size': 0}},
'admin_hierarchy.ADMIN_LEVEL_1.name': country
}
else:
data = factory.internal_collection()
if country:
document_filter = {
'admin_hierarchy.ADMIN_LEVEL_1.name': country
}
if not document_filter:
document_filter = {'name': {'$exists': True, '$not': {'$size': 0}}}
objects = data.find(document_filter)
return render_template('admin/{}/list.js'.format(provider_type), e=escape, items=objects)
##############################################
# UKRAINE
##############################################
@app.route('/sublocal/ukraine')
@login_required
def ukraine_city(region=None, provincia=None):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.ukraine_city.find()
return render_template('admin/ukraine/region-list.html', com = 0, data=data)
@app.route('/ukraine/<int:city_id>/<string:city_type>')
@login_required
def ukraine_city_sub(city_id=None, city_type=None):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.ukraine_city_sublocal.find({"city_id": city_id, "type":city_type})
city_name = db.ukraine_city.find_one({"city_id": city_id})
c_n = city_name["title"]
languages = ["uk","ru","ca", "lv","en","pl" ,"de", "fr" , "it", "es", "ro", "nl", "el" , "cs", "pt", "hu" , "sv", "bg", "sr", "da", "fi", "sk", "sl", "hr", "lt"]
languages_tr = ["uk","ru","ca", "lv","en","pl" ,"de", "fr" , "it", "es", "ro", "nl", "el" , "cs", "pt", "hu" , "sv", "bg", "sr", "da", "fi", "sk", "sl", "hr", "lt"]
languages_td = ["uk","ru","ca", "lv","en","pl" ,"de", "fr" , "it", "es", "ro", "nl", "el" , "cs", "pt", "hu" , "sv", "bg", "sr", "da", "fi", "sk", "sl", "hr", "lt"]
land_defoult = ["uk","ru", "en","pl","de", "fr" , "it", "es", "ro"]
return render_template('admin/ukraine/list.html', start_index=8 ,languages_td = languages_td, languages= enumerate(languages),languages_tr = enumerate(languages_tr), land_defoult=land_defoult, city_name = c_n, com = 0, data=data)
@app.context_processor
def utility_processor():
def counter(type_city, city_id):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
cnt = db.ukraine_city_sublocal.find({"city_id": city_id, "type":type_city}).count()
return cnt
return dict(counter=counter)
# sinoptik_db_ro
# @app.context_processor
# def utility_processor():
# def counter(type_city, city_id):
# config = Config('./config/config.yml')
# mongo_config = config.get('mongodb')
# connection = MongoClient(mongo_config['host'], mongo_config['port'])
# db = connection.location
# cnt = db.ukraine_city_sublocal.find({"city_id": city_id, "type":type_city}).count()
# return cnt
# return dict(counter=counter)
@app.route('/urk_delete', methods=['GET', 'POST'])
@login_required
def urk_delete():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.ukraine_city_sublocal.remove({"_id" : ObjectId(request.form['id']) })
return request.form['id']
@app.route('/urk_sub_confirm', methods=['GET', 'POST'])
@login_required
def urk_sub_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.ukraine_city_sublocal.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status":4}})
return request.form['id']
@app.route('/urk_translate_update', methods=['GET', 'POST'])
@login_required
def urk_translate_update():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.ukraine_city_sublocal.update_one({"_id" : ObjectId(request.form['mongo_id']) },{"$set" : {"translate."+request.form['lang']+".name":request.form['value']}})
return request.form['mongo_id']
@app.route('/urk_translate_confirm', methods=['GET', 'POST'])
@login_required
def urk_translate_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.ukraine_city_sublocal.update_one({"_id" : ObjectId(request.form['mongo_id']) },{"$set" : {"translate."+request.form['lang']+".status":1}})
return request.form['mongo_id']
@app.route('/urk_sub_confirm_delete', methods=['GET', 'POST'])
@login_required
def urk_sub_confirm_delete():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.ukraine_city_sublocal.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status":4}})
return request.form['id']
##############################################
# END UKRAINE
##############################################
@app.route('/matching/belarus')
@app.route('/matching/belarus/<string:region>')
@login_required
def matching_belarus(region=None, provincia=None):
mode = request.args.get('mode', 'none')
Provincia = {
'1':'Брестская',
'2':'Витебская',
'3':'Гомельская',
'4':'Гродненская',
'5':'Минская',
'6':'Могилевская',
'7':'Минск',
}
if region is None:
return render_template('admin/belarus/region-list.html', data=Provincia)
else:
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.belarus.find({'NAMEDISTR': str(region)})
return render_template('admin/belarus/list.html', com = 0, data=data)
##############################################
# SPAIN
##############################################
@app.route('/matching/spain/')
@app.route('/matching/spain/<string:region>/<string:provincia>')
@login_required
def matching_spain(region=None, provincia=None):
mode = request.args.get('mode', 'none')
Provincia = {
'01' : 'Araba (Álava)',
'02' : 'Abacente ',
'03' : 'Alicante ',
'04' : 'Almería ',
'05' : 'Avila ',
'06' : 'Badajoz ',
'07' : 'Balears, Illes',
'08' : 'Barcelona ',
'09' : 'Burgos ',
'10' : 'Cáceres ',
'11' : 'Cádiz ',
'12' : 'Castellón',
'13' : 'Ciudad Real',
'14' : 'Córdoba ',
'15' : 'Coruña, A ',
'16' : 'Cuenca ',
'17' : 'Girona ',
'18' : 'Granada ',
'19' : 'Guadalajara ',
'20' : 'Guipuzcoa ',
'21' : 'Huelva ',
'22' : 'Huesca ',
'23' : 'Jaén ',
'24' : 'León ',
'25' : 'Lleida ',
'26' : 'Rioja, La ',
'27' : 'Lugo ',
'28' : 'Madrid ',
'29' : 'Málaga ',
'30' : 'Murcia ',
'31' : 'Navarra ',
'32' : 'Ourense ',
'33' : 'Asturias ',
'34' : 'Palencia ',
'35' : 'Las Palmas ',
'36' : 'Pontevedra ',
'37' : 'Salamanca ',
'38' : 'Santa Cruz de Tenerife',
'39' : 'Cantabria ',
'40' : 'Segovia ',
'41' : 'Sevilla ',
'42' : 'Soria ',
'43' : 'Tarragona ',
'44' : 'Teruel ',
'45' : 'Toledo ',
'46' : 'Valencia ',
'47' : 'Valladolid ',
'48' : 'Bizkaia ',
'49' : 'Zamora ',
'50' : 'Zaragoza ',
'51' : 'Ceuta ',
'52' : 'Melilla ',
}
types = {"Municipio": ["administrative_area_level_4"],
"Entidad colectiva" : ["administrative_area_level_5", "neighborhood"],
"Otras entidades": ["locality", "neighborhood"],
"Capital de municipio":["locality"],
"Entidad singular": ["locality"]}
if region is None:
return render_template('admin/matching-spain/region-list.html', data=Provincia)
else:
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.internal.find({'20_SNIG_COD_PROV': int(region)})
return render_template('admin/matching-spain/list.html', region=Provincia[str(region)], com = 0, types=types, data=data)
@app.route('/sinoptik_db_confirm', methods=['GET', 'POST'])
@login_required
def sinoptik_db_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.spain_sql_sinoptik.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status":4}})
return request.form['id']
@app.route('/sinoptik_db_delete-confirm', methods=['GET', 'POST'])
@login_required
def sinoptik_db_delete_status_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.spain_sql_sinoptik.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status":4}})
return request.form['id']
@app.route('/sinoptik_db_reparse', methods=['GET', 'POST'])
@login_required
def sinoptik_db_reparse():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
# db.spain_sql_sinoptik.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status":4}})
row = db.spain_sql_sinoptik.find_one({"_id" : ObjectId(request.form['sinoptik_id'])})
parce_data = db.internal.find_one({"_id" : ObjectId(request.form['parser_id'])})
if parce_data is not None:
# print(row['city_title'])
# print (parce_data)
comparison = getDistance(row['lat'], row['lng'], parce_data['28_SNIG_LATITUD_ETRS89'], parce_data['29_SNIG_LONGITUD_ETRS89'])
comparison_url =("https://www.google.com.ua/maps/dir/"+str(row['lat'])+","+str(row['lng'])+"/"+str(parce_data['28_SNIG_LATITUD_ETRS89'])+","+str(parce_data['29_SNIG_LONGITUD_ETRS89'])+"")
db.spain_sql_sinoptik.update_one(
{"_id": row['_id'] },
{
"$set": {
"parser_id": parce_data['_id'],
"comparison": comparison,
"SNIG_NOMBRE": parce_data['24_SNIG_NOMBRE'],
"comparison_url":comparison_url,
}
}
)
status = True
if comparison >= 2:
status = False
responce = {
# "parser_id": ObjectId(parce_data['_id']),
"comparison": comparison,
"SNIG_NOMBRE": parce_data['24_SNIG_NOMBRE'],
"comparison_url":comparison_url,
"comparison_status":status,
}
return json.dumps(responce)
@app.route('/matching-spain-update_snig', methods=['GET', 'POST'])
@login_required
def update_status_snig():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.internal.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_snig":1}})
return request.form['id']
@app.route('/matching-spain-delete-confirm_snig', methods=['GET', 'POST'])
@login_required
def delete_status_confirm_snig():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.internal.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_snig":1}})
return request.form['id']
@app.route('/matching-spain-update', methods=['GET', 'POST'])
@login_required
def update_status():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.internal.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status":4}})
return request.form['id']
@app.route('/matching-spain-delete-confirm', methods=['GET', 'POST'])
@login_required
def delete_status_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.internal.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status":4}})
return request.form['id']
def getDistance(lat1,lon1,lat2,lon2):
Key = Keygen()
url = 'https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins='+str(lat1)+','+str(lon1)+'&destinations='+str(lat2)+','+str(lon2)+'&key='+str(Key.get_key_distance())+''
# print(url)
response = requests.get(url)
data = response.json()
try:
resp = data['rows'][0]['elements'][0]['distance']['value'] / 1000
except Exception as e:
resp = 0
print (round(resp, 2))
return round(resp, 2)
@app.route('/spain-reparse_by_geocode', methods=['GET', 'POST'])
@login_required
def reparse_by_geocode():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
# use_cache = bool(get.get('use_cache', True))
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
doc = db.internal.find_one({"_id" : ObjectId(request.form['id']) })
doc_factory = DocFactory(config.get('mongodb'))
Key = Keygen()
keyAPI = Key.get_key_place()
if not keyAPI:
sys.exit()
cnf = {'googlemaps':{'geocoding':{'key': keyAPI}}}
config.set(cnf)
gmap_config = config.get('googlemaps')
# gmap_config.update(language=language)
language = 'es'
gmap_loader = LoaderFactory.loader_gmaps_with_cache(gmaps_config=gmap_config,
storage_config=config.get('mongodb'))
spider = Spider(
loader_factory=LoaderFactory,
gmap_parser=MapFactory.spain,
doc_factory=doc_factory,
language=language,
config=config,
use_cache=True
)
if request.form['type'] == "autocomplete":
raw = gmap_loader.by_places(doc['08_INE_Name_w_Article'] + ', España')
return json.dumps(raw)
else:
objects = spider.get_gmap_place_id(request.form['place_id'])
gmap = {}
gmap = objects[0].get_document()
try:
if gmap['name'].lower().lstrip().strip() == doc['08_INE_Name_w_Article'].lower().lstrip().strip():
gmap['comparison'] = True
else:
gmap['comparison'] = False
except Exception as e:
gmap['comparison'] = False
gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['15_gmap_comparison_url'] =("https://www.google.com.ua/maps/dir/"+str(gmap['center']['lat'])+","+str(gmap['center']['lng'])+"/"+str(doc['28_SNIG_LATITUD_ETRS89'])+","+str(doc['29_SNIG_LONGITUD_ETRS89'])+"")
db.internal.update_one(
{"_id": ObjectId(request.form['id']) },
{
"$set": {
"10_gmap_name": gmap.get('name'),
"17_gmap_admin_hierarchy": gmap.get('admin_hierarchy', {}),
"gmap_center": gmap.get('center'),
"gmap_bounds": gmap.get('bounds'),
"12_gmap_type": gmap.get('type'),
"15_GMap_center_SNIG_comparison": gmap.get('15_GMap_center_SNIG_comparison'),
"15_gmap_comparison_url": gmap.get('15_gmap_comparison_url'),
"11_gmap_comparison" : gmap['comparison']
}
}
)
gmap.pop('_id')
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
if gmap['15_GMap_center_SNIG_comparison'] <= 1:
gm_comp_status = True
else:
gm_comp_status = False
types = {"Municipio": ["administrative_area_level_4"],
"Entidad colectiva" : ["administrative_area_level_5", "neighborhood"],
"Otras entidades": ["locality", "neighborhood"],
"Capital de municipio":["locality"],
"Entidad singular": ["locality"]}
if gmap.get('type') in types[doc['25_SNIG_TIPO']]:
gm_type_status = True
else:
gm_type_status = False
raw = {
"gmap_name": gmap.get('name'),
"gmap_name_status" : gmap['comparison'],
"gmap_type": gmap.get('type'),
"15_GMap_center_SNIG_comparison": gmap.get('15_GMap_center_SNIG_comparison'),
"15_gmap_comparison_url": gmap.get('15_gmap_comparison_url'),
"gmap_comp_status":gm_comp_status,
"gmap_type_status":gm_type_status,
}
return json.dumps(raw)
@app.route('/sinoptik_db/spain', methods=['GET', 'POST'])
@login_required
def sinoptik_db():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data_db = db.spain_sql_sinoptik.find()
# data = list()
# array = dict()
# for item in data_db:
# # array['sinoptik_db'] = item
# # print (item['title'])
# if 'parser_id' in item:
# ntype = db.internal.find_one({"_id" : item['parser_id']})
# data_db['type_snig'] = ntype["25_SNIG_TIPO"]
# data.append(array)
return render_template('admin/matching-spain/sinoptik_db.html', data=data_db)
##############################################
# END SPAIN
##############################################
@app.route('/sinoptik_db/romania', methods=['GET', 'POST'])
@login_required
def sinoptik_db_romania():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data_db = db.sinoplik_romania.find()
region = {
"2502":"8",
"2460":"8",
"2495":"7",
"2466":"7",
"2497":"7",
"2475":"7",
"2474":"7",
"2501":"7",
"2471":"1",
"2478":"1",
"2491":"1",
"2483":"1",
"2481":"1",
"2467":"1",
"2488":"6",
"2470":"6",
"2461":"6",
"2476":"6",
"2479":"6",
"2496":"6",
"2473":"3",
"2494":"3",
"2487":"3",
"2492":"3",
"2468":"3",
"2500":"3",
"2499":"3",
"2469":"2",
"2477":"2",
"2482":"2",
"2465":"2",
"2498":"2",
"2464":"2",
"2486":"2",
"2480":"4",
"2485":"4",
"2463":"4",
"2484":"4",
"2490":"4",
"2472":"5",
"2489":"5",
"2462":"5",
"2493":"5",
}
# data = list()
# array = dict()
# for item in data_db:
# # array['sinoptik_db'] = item
# # print (item['title'])
# if 'parser_id' in item:
# ntype = db.internal.find_one({"_id" : item['parser_id']})
# data_db['type_snig'] = ntype["25_SNIG_TIPO"]
# data.append(array)
TIP_Name ={
"40":"Judet_admin",
"1":"Municipiu_admin_resedinta_judet",
"2":"Oras_admin",
"3":"Comuna_admin",
"4":"Municipiu_admin",
"5":"Oras_admin_resedinta_judet",
"6":"Sectoarele_Bucuresti",
"9":"Town_resedinta_municipiu",
"10":"Town_municipiu",
"11":"Willage_municipiu",
"17":"Town_resedinta_orasului",
"18":"Town_orasului",
"19":"Willage_orasului",
"22":"Willage_resedinta_comuna",
"23":"Willage_comuna",
}
return render_template('admin/romania/sinoptik_db.html', data=data_db, tip_name=TIP_Name, region=region)
@app.route('/romania_sindb_other', methods=['GET', 'POST'])
@login_required
def romania_sindb_other():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
parce_data = db.romania.find({"DENLOC" : request.form['sinoptik_name'], "gmap_type":"locality"})
responce = dict()
rezult = list()
for x in parce_data:
print( x['_id'])
responce.update({"id":str(x['_id']), "name": x['DENLOC']})
rezult.append(responce)
# responce[x['DENLOC']] = ObjectId(x['_id'])
# result = db.romania.find({"DENLOC" : str(parce_data['DENLOC'])}).count()
# data = {'DENLOC':}
return json.dumps(rezult)
@app.route('/sinoptik_db_confirm_romania', methods=['GET', 'POST'])
@login_required
def sinoptik_db_confirm_romania():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.sinoplik_romania.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status":4}})
return request.form['id']
@app.route('/sinoptik_db_delete-confirm_romania', methods=['GET', 'POST'])
@login_required
def sinoptik_db_delete_status_confirm_romania():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.sinoplik_romania.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status":4}})
return request.form['id']
@app.route('/sinoptik_db_reparse_romania', methods=['GET', 'POST'])
def sinoptik_db_reparse_romania():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
# db.spain_sql_sinoptik.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status":4}})
row = db.sinoplik_romania.find_one({"_id" : ObjectId(request.form['sinoptik_id'])})
parce_data = db.romania.find_one({"_id" : ObjectId(request.form['parser_id'])})
if parce_data is not None:
# print(row['city_title'])
# print (parce_data)
if 'wiki_center' in parce_data:
comparison = getDistance(row['lat'], row['lng'], parce_data["wiki_center"]["lat"], parce_data["wiki_center"]["lng"])
comparison_url =("https://www.google.com.ua/maps/dir/"+str(row['lat'])+","+str(row['lng'])+"/"+str(parce_data["wiki_center"]["lat"])+","+str(parce_data["wiki_center"]["lng"])+"")
else:
comparison = False
comparison_url = '#'
db.sinoplik_romania.update_one(
{"_id": row['_id'] },
{
"$set": {
"parser_id": parce_data['_id'],
"comparison": comparison,
"DENLOC": parce_data['DENLOC'],
"comparison_url":comparison_url,
}
}
)
status = True
if comparison >= 2:
status = False
responce = {
# "parser_id": ObjectId(parce_data['_id']),
"comparison": comparison,
"SNIG_NOMBRE": parce_data['DENLOC'],
"comparison_url":comparison_url,
"comparison_status":status,
}
return json.dumps(responce)
@app.route('/matching/romania')
@app.route('/matching/romania/<string:region>')
@login_required
def matching_romania(region=None):
mode = request.args.get('mode', 'none')
Provincia = {
'1' : 'NORD-EST',
'2' : 'SUD-EST',
'3' : 'SUD - MUNTENIA',
'4' : 'SUD-VEST - OLTENIA',
'5' : 'VEST',
'6' : 'NORD-VEST',
'7' : 'CENTRU',
'8' : 'BUCURESTI - ILFOV',
}
if region is None:
return render_template('admin/romania/region-list.html', data=Provincia)
else:
TIP_Name ={
"40":"Judet_admin",
"1":"Municipiu_admin_resedinta_judet",
"2":"Oras_admin",
"3":"Comuna_admin",
"4":"Municipiu_admin",
"5":"Oras_admin_resedinta_judet",
"6":"Sectoarele_Bucuresti",
"9":"Town_resedinta_municipiu",
"10":"Town_municipiu",
"11":"Willage_municipiu",
"17":"Town_resedinta_orasului",
"18":"Town_orasului",
"19":"Willage_orasului",
"22":"Willage_resedinta_comuna",
"23":"Willage_comuna",
}
types = {
"40": ["administrative_area_level_1"],
"1": ["administrative_area_level_2"],
"2": ["administrative_area_level_2"],
"3": ["administrative_area_level_2"],
"4": ["administrative_area_level_2"],
"5": ["administrative_area_level_2"],
"6": ["sublocality_level_1"],
"9": ["locality","sublocality_level_1"],
"10": ["locality","sublocality_level_1"],
"11": ["locality","sublocality_level_1"],
"17": ["locality","sublocality_level_1"],
"18": ["locality","sublocality_level_1"],
"19": ["locality","sublocality_level_1"],
"22": ["locality","sublocality_level_1"],
"23": ["locality","sublocality_level_1"]
}
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.romania.find({'REGIUNE': int(region)})
return render_template('admin/romania/list.html', region=Provincia[str(region)], types=types, tip_name=TIP_Name, com = 0, data=data)
@login_required
@app.route('/romania-reparse_wiki', methods=['GET', 'POST'])
def romania_reparse_wik():
from lib.parser.wiki.romania import Romania as WikiRo
country = 'Romania'
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
conn = MongoClient(mongo_config['host'], mongo_config['port'])
db = conn.location
doc_factory = DocFactory(config.get('mongodb'))
loader = Loader.loader_with_mongodb(config.get('mongodb'))
headers = {'User-Agent': 'Mozilla/5.0'}
language='ro'
wiki_url_r = urllib.parse.quote(request.form['wiki_url'], safe=":/")
doc = doc_factory.wiki(wiki_url_r)
page, code = loader.load(wiki_url_r, headers=headers)
page_parser = WikiRo(page)
data = page_parser.as_dictionary()
doc_mongo = db.romania.find_one({"_id" : ObjectId(request.form['wiki_mongo']) })
if 'gmap_center' in doc_mongo:
center_gm = doc_mongo['gmap_center']
center_wiki = data.get('center')
gmap_comparison_url = ("https://www.google.com.ua/maps/dir/"+str(center_gm['lat'])+","+str(center_gm['lng'])+"/"+str(center_wiki["lat"])+","+str(center_wiki["lng"])+"")
# gmap_comparison_url =
distance = getDistance(center_gm["lat"],center_gm["lng"], center_wiki["lat"], center_wiki["lng"])
db.romania.update_one(
{"_id": ObjectId(request.form['wiki_mongo'])},
{
"$set": {
'wiki_name': data.get('name'),
'wiki_admin_hierarchy': data.get('admin_hierarchy', {}),
'wiki_center': data.get('center'),
'wiki_url': str(request.form['wiki_url']),
'gmap_wiki_distance': distance,
'wiki_postal_code': data.get('postal_codes'),
}
}
)
if distance < 2:
distance_status = True
else:
distance_status = False
else:
distance_status = False
distance = False
gmap_comparison_url = '#'
db.romania.update_one(
{"_id": ObjectId(request.form['wiki_mongo'])},
{
"$set": {
'wiki_name': data.get('name'),
'wiki_admin_hierarchy': data.get('admin_hierarchy', {}),
'wiki_center': data.get('center'),
'wiki_url': str(request.form['wiki_url']),
'wiki_postal_code': data.get('postal_codes'),
}
}
)
resp = {
"distance_status":distance_status,
"distance":distance,
"gmap_comparison_url":gmap_comparison_url,
'wiki_name': data.get('name'),
'wiki_center': data.get('center'),
}
return json.dumps(resp)
@app.context_processor
def utility_processor():
def autocomplete(pid):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.romania.update_one({"_id" : ObjectId(pid) },{"$set" : {"status_autoconfirm":1}})
return str('')
return dict(autocomplete = autocomplete)
@app.context_processor
def dublicate():
def isdub(lat, lng):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
cnt = db.internal.find({"28_SNIG_LATITUD_ETRS89":lat, "29_SNIG_LONGITUD_ETRS89":lng}).count()
if cnt > 1:
return "True"
else:
return "False"
return dict(isdub = isdub)
@app.route('/matching-romania-confirm', methods=['GET', 'POST'])
@login_required
def romania_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.romania.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_snig":1}})
return request.form['id']
@app.route('/matching-romania-confirm_ins', methods=['GET', 'POST'])
@login_required
def romania_confirm_ins():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.romania.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_ins":1}})
return request.form['id']
@login_required
def romania_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.romania.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_autoconfirm":1}})
return request.form['id']
@app.route('/matching-romania-confirm_del_ins', methods=['GET', 'POST'])
@login_required
def romania_confirm_del():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.romania.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_snig":1}})
return request.form['id']
#######################################################
@app.route('/belarus_st')
@login_required
def belarus_st(region=None, provincia=None):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.belarus_st.find()
return render_template('admin/belarus/list_st.html', com = 0, data=data)
@app.route('/belarus_ootp')
@login_required
def belarus_belarus_oopt(region=None, provincia=None):
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
data = db.belarus_oopt.find()
return render_template('admin/belarus/list_ootp.html', com = 0, data=data)
@app.route('/bl_st_confirm', methods=['GET', 'POST'])
@login_required
def belarus_st_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_st.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_snig":1}})
return request.form['id']
@app.route('/bl_st_confirm_delete', methods=['GET', 'POST'])
@login_required
def belarus_ST_confirm_del():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_st.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_snig":1}})
return request.form['id']
@app.route('/bl_ootp_confirm', methods=['GET', 'POST'])
@login_required
def belarus_ootp_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_oopt.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_snig":1}})
return request.form['id']
@app.route('/bl_ootp_confirm_delete', methods=['GET', 'POST'])
@login_required
def belarus_ootp_confirm_del():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_oopt.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_snig":1}})
return request.form['id']
@app.route('/bl_st_confirm_center', methods=['GET', 'POST'])
@login_required
def belarus_st_confirm_center():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_st.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_center":1}})
return request.form['id']
@app.route('/bl_st_confirm_delete_center', methods=['GET', 'POST'])
@login_required
def belarus_ST_confirm_del_center():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_st.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_center":1}})
return request.form['id']
@app.route('/matching-belarus-confirm', methods=['GET', 'POST'])
@login_required
def belarus_confirm():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_snig":1}})
return request.form['id']
@app.route('/belarus_my_coordinate', methods=['GET', 'POST'])
@login_required
def belarus_my_coordinate():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
coord = request.form['coordinate'].split(",")
db.belarus.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"lat":str(coord[0]), "lng":str(coord[1]) }})
return json.dumps({'id':request.form['id'], "XCoord":coord[0], "YCoord":coord[1] })
@app.route('/belarus_ootp_my_coordinate', methods=['GET', 'POST'])
@login_required
def belarus_ootp_my_coordinate():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_oopt.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"nawname":request.form['nawname'] }})
return json.dumps({'id':request.form['id']})
@app.route('/belarus_st_my_coordinate', methods=['GET', 'POST'])
@login_required
def belarus_st_my_coordinate():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus_st.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"my_lat":request.form['my_lat'], "my_lng":request.form['my_lng'] }})
return json.dumps({'id':request.form['id']})
@app.route('/matching-belarus-confirm_del', methods=['GET', 'POST'])
@login_required
def belarus_confirm_del():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_snig":1}})
return request.form['id']
@app.route('/matching-belarus-confirm_ins', methods=['GET', 'POST'])
@login_required
def belarus_confirm_ins():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus.update_one({"_id" : ObjectId(request.form['id']) },{"$set" : {"status_ins":1}})
return request.form['id']
@app.route('/matching-belarus-confirm_del_ins', methods=['GET', 'POST'])
@login_required
def belarus_confirm_ins_del():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
db.belarus.update_one({"_id" : ObjectId(request.form['id']) },{"$unset" : {"status_ins":1}})
return request.form['id']
####################################################################################################
@app.route('/romania-reparse_by_geocode', methods=['GET', 'POST'])
@login_required
def romania_reparse_by_geocode():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
# use_cache = bool(get.get('use_cache', True))
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
doc = db.romania.find_one({"_id" : ObjectId(request.form['id']) })
doc_factory = DocFactory(config.get('mongodb'))
Key = Keygen()
keyAPI = Key.get_key_place()
if not keyAPI:
sys.exit()
cnf = {'googlemaps':{'geocoding':{'key': keyAPI}}}
config.set(cnf)
gmap_config = config.get('googlemaps')
# gmap_config.update(language=language)
language = 'ro'
gmap_loader = LoaderFactory.loader_gmaps_with_cache(gmaps_config=gmap_config,
storage_config=config.get('mongodb'))
spider = Spider(
loader_factory=LoaderFactory,
gmap_parser=MapFactory.spain,
doc_factory=doc_factory,
language=language,
config=config,
use_cache=True
)
if request.form['type'] == "autocomplete":
raw = gmap_loader.by_places(doc['DENLOC'] + ', România')
return json.dumps(raw)
else:
objects = spider.get_gmap_place_id(request.form['place_id'])
gmap = {}
gmap = objects[0].get_document()
try:
if gmap['name'].lower().replace('ă', 'a').replace('ã', 'a').replace('â', 'a').replace('î', 'i').replace('ș', 's').replace('ş', 's').replace('ț', 't').replace('ţ', 't') == doc['DENLOC'].lower().replace('ă', 'a').replace('ã', 'a').replace('â', 'a').replace('î', 'i').replace('ș', 's').replace('ş', 's').replace('ț', 't').replace('ţ', 't'):
gmap['comparison'] = True
else:
gmap['comparison'] = False
except Exception as e:
gmap['comparison'] = False
pcs = False
if 'postal_code' in gmap and gmap.get('postal_code') == doc['CODP']:
pcs = True
else:
pcs = False
if 'wiki_center' in doc:
distance = getDistance(gmap['center']['lat'], gmap['center']['lng'], doc["wiki_center"]["lat"],doc["wiki_center"]["lng"])
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] =("https://www.google.com.ua/maps/dir/"+str(gmap['center']['lat'])+","+str(gmap['center']['lng'])+"/"+str(doc["wiki_center"]["lat"])+","+str(doc["wiki_center"]["lng"])+"")
else:
distance = False
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] = False
db.romania.update_one(
{"_id": ObjectId(request.form['id']) },
{
"$set": {
'gmap_name': gmap.get('name'),
'gmap_admin_hierarchy': gmap.get('admin_hierarchy', {}),
'gmap_center': gmap.get('center'),
'gmap_bounds': gmap.get('bounds'),
'gmap_type': gmap.get('type'),
'gmap_translate': gmap.get('translate'),
'gmap_requests': gmap.get('requests'),
'gmap_code': gmap.get('code'),
'gmap_postal_code': gmap.get('postal_code'),
'gmap_wiki_distance':distance
}
}
)
if distance < 2:
distance_status = True
else:
distance_status = False
# gmap.pop('_id')
# # gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
# if gmap['15_GMap_center_SNIG_comparison'] <= 1:
# gm_comp_status = True
# else:
# gm_comp_status = False
# types = {"Municipio": ["administrative_area_level_4"],
# "Entidad colectiva" : ["administrative_area_level_5", "neighborhood"],
# "Otras entidades": ["locality", "neighborhood"],
# "Capital de municipio":["locality"],
# "Entidad singular": ["locality"]}
# if gmap.get('type') in types[doc['25_SNIG_TIPO']]:
# gm_type_status = True
# else:
# gm_type_status = False
types = {
"40": ["administrative_area_level_1"],
"1": ["administrative_area_level_2"],
"2": ["administrative_area_level_2"],
"3": ["administrative_area_level_2"],
"4": ["administrative_area_level_2"],
"5": ["administrative_area_level_2"],
"6": ["sublocality_level_1"],
"9": ["locality","sublocality_level_1"],
"10": ["locality","sublocality_level_1"],
"11": ["locality","sublocality_level_1"],
"17": ["locality","sublocality_level_1"],
"18": ["locality","sublocality_level_1"],
"19": ["locality","sublocality_level_1"],
"22": ["locality","sublocality_level_1"],
"23": ["locality","sublocality_level_1"]
}
mytypes = types[str(doc['TIP'])]
if gmap.get('type') in mytypes:
gmap_type_status = True
else:
gmap_type_status = False
raw = {
"gmap_name": gmap.get('name'),
"gmap_name_status" : gmap['comparison'],
"gmap_type": gmap.get('type'),
"gmap_type_status":gmap_type_status,
"gmap_postal_code": gmap.get('postal_code'),
"pcs":pcs,
"distance":distance,
"distance_status":distance_status,
"gmap_comparison_url":gmap['gmap_comparison_url'],
'gmap_type': gmap.get('type'),
}
return json.dumps(raw)
@app.route('/belarus-st_by_geocode', methods=['GET', 'POST'])
@login_required
def belarus_st_by_geocode():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
# use_cache = bool(get.get('use_cache', True))
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
doc = db.belarus_st.find_one({"_id" : ObjectId(request.form['id']) })
doc_factory = DocFactory(config.get('mongodb'))
Key = Keygen()
keyAPI = Key.get_key_place()
if not keyAPI:
sys.exit()
cnf = {'googlemaps':{'geocoding':{'key': keyAPI}}}
config.set(cnf)
gmap_config = config.get('googlemaps')
# gmap_config.update(language=language)
language = 'ru'
gmap_loader = LoaderFactory.loader_gmaps_with_cache(gmaps_config=gmap_config,
storage_config=config.get('mongodb'))
spider = Spider(
loader_factory=LoaderFactory,
gmap_parser=MapFactory.spain,
doc_factory=doc_factory,
language=language,
config=config,
use_cache=True
)
if request.form['type'] == "autocomplete":
if 'NAMESELSOVET' in doc:
raw = gmap_loader.by_places(doc['NAMEOBJECT'] + ', '+str(doc['NAMESELSOVET']))
else:
raw = gmap_loader.by_places(str(doc['NAMEOBJECT']))
return json.dumps(raw)
else:
objects = spider.get_gmap_place_id(request.form['place_id'])
gmap = {}
gmap = objects[0].get_document()
try:
if gmap['name'] == doc['NAMEOBJECT']:
gmap['comparison'] = True
else:
gmap['comparison'] = False
except Exception as e:
gmap['comparison'] = False
if 'XCoord' in doc:
distance = getDistance(gmap['center']['lat'], gmap['center']['lng'], doc["XCoord"],doc["YCoord"])
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] =("https://www.google.com.ua/maps/dir/"+str(gmap['center']['lat'])+","+str(gmap['center']['lng'])+"/"+str(doc["XCoord"])+","+str(doc["YCoord"])+"")
else:
distance = False
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] = False
db.belarus_st.update_one(
{"_id": ObjectId(request.form['id']) },
{
"$set": {
'gmap_name': gmap.get('name'),
'gmap_admin_hierarchy': gmap.get('admin_hierarchy', {}),
'gmap_center': gmap.get('center'),
'gmap_bounds': gmap.get('bounds'),
'gmap_type': gmap.get('type'),
'gmap_translate': gmap.get('translate'),
'gmap_requests': gmap.get('requests'),
'gmap_code': gmap.get('code'),
'gmap_postal_code': gmap.get('postal_code'),
'gmap_st_distance':distance
}
}
)
if distance < 2:
distance_status = True
else:
distance_status = False
# gmap.pop('_id')
# # gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
# if gmap['15_GMap_center_SNIG_comparison'] <= 1:
# gm_comp_status = True
# else:
# gm_comp_status = False
# types = {"Municipio": ["administrative_area_level_4"],
# "Entidad colectiva" : ["administrative_area_level_5", "neighborhood"],
# "Otras entidades": ["locality", "neighborhood"],
# "Capital de municipio":["locality"],
# "Entidad singular": ["locality"]}
# if gmap.get('type') in types[doc['25_SNIG_TIPO']]:
# gm_type_status = True
# else:
# gm_type_status = False
raw = {
"gmap_name": gmap.get('name'),
"gmap_name_status" : gmap['comparison'],
"gmap_type": gmap.get('type'),
"distance":distance,
"distance_status":distance_status,
"gmap_comparison_url":gmap['gmap_comparison_url'],
'gmap_type': gmap.get('type'),
}
return json.dumps(raw)
@app.route('/belarus-ootp_by_geocode', methods=['GET', 'POST'])
@login_required
def belarus_ootp_by_geocode():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
# use_cache = bool(get.get('use_cache', True))
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
doc = db.belarus_oopt.find_one({"_id" : ObjectId(request.form['id']) })
doc_factory = DocFactory(config.get('mongodb'))
Key = Keygen()
keyAPI = Key.get_key_place()
if not keyAPI:
sys.exit()
cnf = {'googlemaps':{'geocoding':{'key': keyAPI}}}
config.set(cnf)
gmap_config = config.get('googlemaps')
# gmap_config.update(language=language)
language = 'ru'
gmap_loader = LoaderFactory.loader_gmaps_with_cache(gmaps_config=gmap_config,
storage_config=config.get('mongodb'))
spider = Spider(
loader_factory=LoaderFactory,
gmap_parser=MapFactory.spain,
doc_factory=doc_factory,
language=language,
config=config,
use_cache=True
)
if request.form['type'] == "autocomplete":
if 'NAMESELSOVET' in doc:
raw = gmap_loader.by_places(doc['NAMEOBJECT'] + ', '+str(doc['NAMESELSOVET']))
else:
raw = gmap_loader.by_places(str(doc['NAMEOBJECT']))
return json.dumps(raw)
else:
objects = spider.get_gmap_place_id(request.form['place_id'])
gmap = {}
gmap = objects[0].get_document()
try:
if gmap['name'] == doc['NAMEOBJECT']:
gmap['comparison'] = True
else:
gmap['comparison'] = False
except Exception as e:
gmap['comparison'] = False
if 'lat' in doc:
distance = getDistance(gmap['center']['lat'], gmap['center']['lng'], doc["lat"],doc["lng"])
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] =("https://www.google.com.ua/maps/dir/"+str(gmap['center']['lat'])+","+str(gmap['center']['lng'])+"/"+str(doc["lat"])+","+str(doc["lng"])+"")
else:
distance = False
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] = False
db.belarus_oopt.update_one(
{"_id": ObjectId(request.form['id']) },
{
"$set": {
'gmap_name': gmap.get('name'),
'gmap_admin_hierarchy': gmap.get('admin_hierarchy', {}),
'gmap_center': gmap.get('center'),
'gmap_bounds': gmap.get('bounds'),
'gmap_type': gmap.get('type'),
'gmap_translate': gmap.get('translate'),
'gmap_requests': gmap.get('requests'),
'gmap_code': gmap.get('code'),
'gmap_postal_code': gmap.get('postal_code'),
'gmap_st_distance':distance
}
}
)
if distance < 2:
distance_status = True
else:
distance_status = False
# gmap.pop('_id')
# # gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
# if gmap['15_GMap_center_SNIG_comparison'] <= 1:
# gm_comp_status = True
# else:
# gm_comp_status = False
# types = {"Municipio": ["administrative_area_level_4"],
# "Entidad colectiva" : ["administrative_area_level_5", "neighborhood"],
# "Otras entidades": ["locality", "neighborhood"],
# "Capital de municipio":["locality"],
# "Entidad singular": ["locality"]}
# if gmap.get('type') in types[doc['25_SNIG_TIPO']]:
# gm_type_status = True
# else:
# gm_type_status = False
raw = {
"gmap_name": gmap.get('name'),
"gmap_name_status" : gmap['comparison'],
"gmap_type": gmap.get('type'),
"distance":distance,
"distance_status":distance_status,
"gmap_comparison_url":gmap['gmap_comparison_url'],
'gmap_type': gmap.get('type'),
}
return json.dumps(raw)
@app.route('/belarus-reparse_by_geocode', methods=['GET', 'POST'])
@login_required
def belarusreparse_by_geocode():
# return render_template('admin/gmap/list.html', country=country)
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
# use_cache = bool(get.get('use_cache', True))
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.location
doc = db.belarus.find_one({"_id" : ObjectId(request.form['id']) })
doc_factory = DocFactory(config.get('mongodb'))
# Key = Keygen()
# keyAPI = Key.get_key_place()
# if not keyAPI:
# sys.exit()
# cnf = {'googlemaps':{'geocoding':{'key': keyAPI}}}
# print (keyAPI)
# config.set(cnf)
gmap_config = config.get('googlemaps')
# gmap_config.update(language=language)
language = 'ru'
gmap_loader = LoaderFactory.loader_gmaps_with_cache(gmaps_config=gmap_config,
storage_config=config.get('mongodb'))
spider = Spider(
loader_factory=LoaderFactory,
gmap_parser=MapFactory.spain,
doc_factory=doc_factory,
language=language,
config=config,
use_cache=True
)
if request.form['type'] == "autocomplete":
if 'NAMESELSOVET' in doc:
raw = gmap_loader.by_places(doc['NAMEOBJECT'] + ', '+str(doc['NAMESELSOVET']))
else:
raw = gmap_loader.by_places(str(doc['NAMEOBJECT']))
return json.dumps(raw)
else:
objects = spider.get_gmap_place_id(request.form['place_id'])
gmap = {}
gmap = objects[0].get_document()
try:
if gmap['name'] == doc['NAMEOBJECT']:
gmap['comparison'] = True
else:
gmap['comparison'] = False
except Exception as e:
gmap['comparison'] = False
if 'lat' in doc:
distance = getDistance(gmap['center']['lat'], gmap['center']['lng'], doc["lat"],doc["lng"])
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] =("https://www.google.com.ua/maps/dir/"+str(gmap['center']['lat'])+","+str(gmap['center']['lng'])+"/"+str(doc["lat"])+","+str(doc["lng"])+"")
else:
distance = False
# gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
gmap['gmap_comparison_url'] = False
db.belarus.update_one(
{"_id": ObjectId(request.form['id']) },
{
"$set": {
'gmap_name': gmap.get('name'),
'gmap_admin_hierarchy': gmap.get('admin_hierarchy', {}),
'gmap_center': gmap.get('center'),
'gmap_bounds': gmap.get('bounds'),
'gmap_type': gmap.get('type'),
'gmap_translate': gmap.get('translate'),
'gmap_requests': gmap.get('requests'),
'gmap_code': gmap.get('code'),
'gmap_postal_code': gmap.get('postal_code'),
'gmap_wiki_distance':distance
}
}
)
if distance < 2:
distance_status = True
else:
distance_status = False
# gmap.pop('_id')
# # gmap['15_GMap_center_SNIG_comparison'] = getDistance(gmap['center']['lat'], gmap['center']['lng'],doc['28_SNIG_LATITUD_ETRS89'],doc['29_SNIG_LONGITUD_ETRS89'])
# if gmap['15_GMap_center_SNIG_comparison'] <= 1:
# gm_comp_status = True
# else:
# gm_comp_status = False
# types = {"Municipio": ["administrative_area_level_4"],
# "Entidad colectiva" : ["administrative_area_level_5", "neighborhood"],
# "Otras entidades": ["locality", "neighborhood"],
# "Capital de municipio":["locality"],
# "Entidad singular": ["locality"]}
# if gmap.get('type') in types[doc['25_SNIG_TIPO']]:
# gm_type_status = True
# else:
# gm_type_status = False
types = {
"40": ["administrative_area_level_1"],
"1": ["administrative_area_level_2"],
"2": ["administrative_area_level_2"],
"3": ["administrative_area_level_2"],
"4": ["administrative_area_level_2"],
"5": ["administrative_area_level_2"],
"6": ["sublocality_level_1"],
"9": ["locality","sublocality_level_1"],
"10": ["locality","sublocality_level_1"],
"11": ["locality","sublocality_level_1"],
"17": ["locality","sublocality_level_1"],
"18": ["locality","sublocality_level_1"],
"19": ["locality","sublocality_level_1"],
"22": ["locality","sublocality_level_1"],
"23": ["locality","sublocality_level_1"]
}
raw = {
"gmap_name": gmap.get('name'),
"gmap_name_status" : gmap['comparison'],
"gmap_type": gmap.get('type'),
"distance":distance,
"distance_status":distance_status,
"gmap_comparison_url":gmap['gmap_comparison_url'],
'gmap_type': gmap.get('type'),
}
return json.dumps(raw)
@app.route('/users/create', methods=['GET', 'POST'] )
@login_required
def user_create():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
if request.method == 'POST':
pass_hash = generate_password_hash(request.form['password'], method='pbkdf2:sha256')
db.users.insert({"_id": request.form['phone'], "password": pass_hash, "userName": request.form['username'], "userEmail":request.form['email'], "phone": '+'+str(request.form['phone']) })
return redirect(url_for('user_list'))
return render_template('admin/users/create.html')
@app.route('/users/list')
@login_required
def user_list():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
data = db.users.find()
return render_template('admin/users/list.html', data=data)
@app.route('/gmap/keys')
@login_required
def keys_google():
config = Config('./config/config.yml')
mongo_config = config.get('mongodb')
connection = MongoClient(mongo_config['host'], mongo_config['port'])
db = connection.local
data = db.keygen.find()
return render_template('admin/gmap/keys.html', data=data)
# return render_template('admin/users/create.html')
@app.route('/gmaps/')
@app.route('/gmaps/<string:country>')
@login_required
def gmaps_list(country=None):
return render_template('admin/gmap/list.html', country=country)
@app.route('/gmaps/unit/<string:id>')
@login_required
def gmaps_unit(id):
config = Config('./config/config.yml')
api_key = config.get('googlemaps').get('geocoding').get('key')
factory = DocFactory(config.get('mongodb'))
collection = factory.gmaps_collection()
obj = collection.find_one({'_id': ObjectId(id)})
return render_template('admin/gmap/unit.html', data=obj, api_key=api_key)
@app.route('/gmaps/unit/code/<string:id>')
@login_required
def gmap_code_unit(id):
config = Config('./config/config.yml')
api_key = config.get('googlemaps').get('geocoding').get('key')
factory = DocFactory(config.get('mongodb'))
collection = factory.gmaps_collection()
obj = collection.find_one({'code': id})
return render_template('admin/gmap/unit.html', data=obj, api_key=api_key)
@app.route('/wiki/')
@app.route('/wiki/<string:country>')
@login_required
def wiki_list(country=None):
return render_template('admin/wiki/list.html', country=country)
@app.route('/wiki/unit/<string:id>')
@login_required
def wiki_unit(id):
config = Config('./config/config.yml')
api_key = config.get('googlemaps').get('geocoding').get('key')
factory = DocFactory(config.get('mongodb'))
collection = factory.wiki_collection()
obj = collection.find_one({'_id': ObjectId(id)})
return render_template('admin/wiki/unit.html', data=obj, api_key=api_key)
@app.route('/wiki/unit/code/<string:id>')
@login_required
def wiki_code_unit(id):
config = Config('./config/config.yml')
api_key = config.get('googlemaps').get('geocoding').get('key')
factory = DocFactory(config.get('mongodb'))
collection = factory.wiki_collection()
obj = collection.find_one({'code': id})
return render_template('admin/wiki/unit.html', data=obj, api_key=api_key)
@app.route('/tasks/<string:journal_id>')
@app.route('/tasks/<string:journal_id>/<string:status>')
@login_required
def tasks_list(journal_id, status='active'):
config = Config('./config/config.yml')
storage = JobStorage(job_name=journal_id, storage_config=config.get('mongodb'))
if status == storage.STATUS_COMPLETE:
tasks = storage.get_complete()
elif status == storage.STATUS_IN_PROGRESS:
tasks = storage.get_in_progress()
else:
tasks = storage.get_active()
return render_template('admin/tasks/list.html',
name=journal_id,
tasks=tasks
)
@app.route('/tasks/remove/<string:journal_id>')
@login_required
def clear_tasks(journal_id):
config = Config('./config/config.yml')
storage = JobStorage(job_name=journal_id, storage_config=config.get('mongodb'))
storage.clear()
return render_template('admin/empty.html', data='ok', auto_close=True)
@app.route('/logs/<string:name>/<int:status>')
@login_required
def logs(name, status=None):
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
collection = connection.log[name]
if status == 1:
query = {'$and': [{
'message': {'$exists': True}},
{'$or': [{'status': status}, {'status': {'$exists': False}}]}
]}
elif status:
query = {'message': {'$exists': True}, 'status': status}
else:
query = {'message': {'$exists': True}}
return render_template('admin/logs/list.html', logs=collection.find(query), name=name)
@app.route('/logs/remove/<string:name>')
@login_required
def clear_logs(name):
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
connection.log[name].delete_many({})
return render_template('admin/empty.html', data='ok', auto_close=True)
@app.route('/logs/close/<string:name>/<string:id>')
@login_required
def log_close(name, id):
config = Config('./config/config.yml').get('mongodb')
log = MongoDBLog(log_name=name, config=config)
return 'Ok' if log.close(id) else 'Not'
@app.route('/clear/wiki/<string:name>')
@login_required
def clear_wiki_country(name):
config = Config('./config/config.yml').get('mongodb')
factory = DocFactory(config)
collection = factory.wiki_collection()
result = collection.delete_many({'admin_hierarchy.ADMIN_LEVEL_1.name': name})
return render_template('admin/empty.html', data=['ok', result.deleted_count], auto_close=True)
@app.route('/clear/gmaps/<string:name>')
@login_required
def clear_gmaps_country(name):
config = Config('./config/config.yml').get('mongodb')
factory = DocFactory(config)
collection = factory.gmaps_collection()
result = collection.delete_many({'admin_hierarchy.ADMIN_LEVEL_1.name': name})
return render_template('admin/empty.html', data=['ok', result.deleted_count], auto_close=True)
@app.route('/test/wiki')
@login_required
def test_wiki():
pass
@app.route('/test/gmap')
@login_required
def test_gmap():
get = request.args.copy()
raw = {}
parsed = []
if get:
use_cache = bool(get.get('use_cache', True))
method = get.get('method', 'address')
language = get.get('lang', 'en')
config = Config('./config/config.yml')
gmap_config = config.get('googlemaps')
gmap_config.update(language=language)
gmap_loader = LoaderFactory.loader_gmaps_with_cache(gmaps_config=gmap_config,
storage_config=config.get('mongodb'))
spider = Spider(
loader_factory=LoaderFactory,
gmap_parser=MapFactory.france,
language=language,
config=config,
use_cache=use_cache
)
if method == 'address':
address = get.get('address', 'Magadan')
raw = gmap_loader.by_address(address, use_cache=use_cache)
elif method == 'position':
lat = get.get('latitude', 59.558208)
lng = get.get('longitude', 150.822794)
raw = gmap_loader.by_position(lat=lat, lng=lng, use_cache=use_cache)
elif method == 'place_id':
place_id = get.get('place_id', 'ChIJ6UpSLYGEaVkROrwiAnFmzXw')
raw = gmap_loader.by_place_id(place_id=place_id, use_cache=use_cache)
elif method == 'address_type':
address = get.get('address', 'Magadan')
type = get.get('type', 'locality')
raw = spider.gmap_loader.by_places(address=address, use_cache=spider.use_cache)
parsed = spider.get_place_ids_by_address_for_type(address=address, type=type)
if method in ['address', 'position', 'place_id']:
objects = MapFactory.france(raw)
for element in objects:
parsed.append(element.as_dictionary())
return render_template('admin/gmap/test.html', form_data=get, raw=raw, parsed=parsed)
@app.route('/recursive/parsed_page/<string:name>')
@login_required
def recursive_parsed_page_cache(name):
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
objects = connection.parsed_page[name].find()
return render_template('admin/recursive/list.html', items=objects, name='Parsed page')
@app.route('/recursive/parsed_page/drop/<string:name>')
@login_required
def recursive_parsed_page_drop(name):
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
connection.parsed_page[name].drop()
return render_template('admin/empty.html', data='ok', auto_close=True)
@app.route('/recursive/url_pool/drop/<string:name>')
@login_required
def recursive_url_pool_drop(name):
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
connection.url_pool[name].drop()
return render_template('admin/empty.html', data='ok', auto_close=True)
@app.route('/recursive/url_pool/<string:name>')
@login_required
def recursive_url_pool_cache(name):
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
objects = connection.url_pool[name].find({})
return render_template('admin/recursive/list.html', items=objects, name='Url pool')
@app.route('/recursive')
@login_required
def recursive_cache_list():
config = Config('./config/config.yml').get('mongodb')
connection = MongoClient(config['host'], config['port'])
parsed_page = connection.parsed_page
parsed_page_names = parsed_page.collection_names(False)
url_pool = connection.url_pool
url_pool_names = url_pool.collection_names(False)
return render_template('admin/recursive/link_list.html',
parsed_page=parsed_page,
url_pool=url_pool,
parsed_page_names=parsed_page_names,
url_pool_names=url_pool_names
)
@app.route('/worth-it/<string:word>')
@login_required
def secret_page(word):
md5 = hashlib.md5()
md5.update(word.encode('utf-8'))
code = md5.hexdigest()
if code != 'e37d7c242913151ee9d7d794f2027128':
return render_template('admin/empty.html', data=403)
return render_template('admin/worth-it/debug.html')
|
987,897 | 1c458fb70895809517d093f4015df4f3b92f05bf | o = []
e = []
i = 0
user = int(input('Введите сколько вам лет: '))
while i < user:
if i % 2 == 0:
o.append(i)
elif i % 2 != 0:
e.append(i)
i += 1
if user % 2 == 0:
print('четные числа: ', o)
else:
print('нечетные числа: ', e)
# age = int(input('Age: '))
# a = 0 if age%2==0 else 1
# while a<=age:
# print(a)
# a+=2 |
987,898 | a6ccc227cd61aafda0ee3990b1c85a2c2d7389cc | import pickle
import copy
from TarockBasics import TarockBasics
from GameStateTarock import GameStateTarock
from MilestoneAgents import MilestoneAgents
from probamo import index
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
gst = GameStateTarock([], [], [], [1,2,3], 1, [2,3])
tb = TarockBasics()
MODELS = ["1_1", "2_2", "3_3", "5_5"]
def get_files(mypath):
return [f for f in listdir(mypath) if isfile(join(mypath, f))]
def get_playing_order(starting_player):
players = [1,2,3]
players2 = players + players
second_player = players2[players2.index(starting_player) + 1]
third_player = players2[players2.index(starting_player) + 2]
return starting_player, second_player, third_player
def index_results():
index = {"1_1": {"Solo brez": {"0": [], "1": [], "2": []}, "Naprej": {"0": [], "1": [], "2": []}},
"2_2": {"Solo brez": {"0": [], "1": [], "2": []}, "Naprej": {"0": [], "1": [], "2": []}},
"3_3": {"Solo brez": {"0": [], "1": [], "2": []}, "Naprej": {"0": [], "1": [], "2": []}},
"5_5": {"Solo brez": {"0": [], "1": [], "2": []}, "Naprej": {"0": [], "1": [], "2": []}}
}
for model in MODELS:
naprej_count = 0
solobrez_count = 0
for filename in get_files("siciljasti_tarokist/Results/" + model + "/"):
file_obj = open("siciljasti_tarokist/Results/" + model + "/" + filename, "rb")
msa, game, starting_player = pickle.load(file_obj)
if game == "Naprej" and naprej_count < 50:
player_hands, player_points = reconstruct_hands(msa.discard, msa.duo)
current_game = {"player hands": player_hands, "discard": msa.discard,
"points": player_points, "duo": msa.duo}
cq = good_cards(player_hands[1])
index[model][game][str(cq)].append(current_game)
naprej_count += 1
if game == "Solo brez" and solobrez_count < 50:
player_hands, player_points = reconstruct_hands(msa.discard, msa.duo)
current_game = {"player hands": player_hands, "discard": msa.discard,
"points": player_points, "duo": msa.duo}
cq = good_cards(player_hands[1])
index[model][game][str(cq)].append(current_game)
solobrez_count += 1
return index
def reconstruct_hands(discard, duo):
starting_player = {1,2,3}.difference(set(duo)).pop()
starting_player, second_player, third_player = get_playing_order(starting_player)
player_hands = {1: [], 2: [], 3: []}
player_points = {1: 0, 2: 0, 3: 0}
for stack in discard[1:]:
player_hands[starting_player].append(stack[0])
player_hands[second_player].append(stack[1])
player_hands[third_player].append(stack[2])
player_order = [starting_player, second_player, third_player]
win_index = gst.winning_card(stack)
starting_player = player_order[win_index]
player_points[starting_player] += gst.eval_stack(stack)
starting_player, second_player, third_player = get_playing_order(starting_player)
return player_hands, player_points
def good_cards(hand):
king_count = 0
for card in hand:
if card in [8,18,28, 38]:
king_count += 1
tarock_count = 0
for card in hand:
if card >= 40:
tarock_count += 1
if king_count < 2:
return 0
if king_count == 2:
if tarock_count >= 6:
return 1
else:
return 0
if king_count >= 2:
if tarock_count >= 8:
return 2
else:
if tarock_count >= 5:
return 1
else:
return 0
def avg_points(games, player=1, solo=None):
"""
1 games format:
{'player hands': {1: [18, 28, 8, 47, 50, 58, 49, 44, 45, 4, 61, 52, 41, 51, 59, 24], 2: [17, 27, 6, 43, 53, 38, 60, 15, 26, 1, 25, 35, 22, 31, 32, 34], 3: [11, 21, 2, 42, 48, 33, 54, 13, 55, 3, 56, 14, 23, 5, 16, 7]},
'discard': [[12, 36, 37, 40, 46, 57], [17, 11, 18], [28, 27, 21], [8, 6, 2], [47, 43, 42], [50, 53, 48], [38, 33, 58], [49, 60, 54], [15, 13, 44], [45, 26, 55], [3, 4, 1], [61, 25, 56], [52, 35, 14], [41, 22, 23], [51, 31, 5], [59, 32, 16], [24, 34, 7]],
'points': {1: 50, 2: 6, 3: 3},
'duo': [1, 3]}
"""
if solo == None:
tocke = [game["points"][player] for game in games]
elif solo == True:
tocke = [game["points"][player] for game in games if player not in game["duo"]]
elif solo == False:
tocke = [game["points"][player] for game in games if player in game["duo"]]
else:
raise NotImplementedError
if len(tocke) > 0:
return sum(tocke)/len(tocke)
else:
return 0
def overall_averages(index):
naprej_povprecja = []
solobrez_povprecja = []
for model in MODELS:
games_naprej = index[model]["Naprej"]["0"] + index[model]["Naprej"]["1"] + index[model]["Naprej"]["2"]
naprej_povprecja.append((model, avg_points(games_naprej)))
games_solobrez = index[model]["Solo brez"]["0"] + index[model]["Solo brez"]["1"] + index[model]["Solo brez"]["2"]
solobrez_povprecja.append((model, avg_points(games_solobrez)))
fig, axs = plt.subplots(1,2)
fig.suptitle("Best performing models playing with Silicijasti tarokist")
fig.text(0.5, 0.02, 'Models', ha='center')
fig.text(0.08, 0.5, 'Average points per game', va='center', rotation='vertical')
urejeno_naprej = list(sorted(naprej_povprecja, key= lambda x: x[1]))
values_naprej = [x[1] for x in urejeno_naprej]
names_naprej = [x[0] for x in urejeno_naprej]
urejeno_solobrez = list(sorted(solobrez_povprecja, key= lambda x: x[1]))
values_solobrez = [x[1] for x in urejeno_solobrez]
names_solobrez = [x[0] for x in urejeno_solobrez]
axs[0].bar(names_naprej, values_naprej)
axs[0].title.set_text("Playing duo")
#active_axis.set_ylim([7, 35])
#active_axis.set_ylabel("Average points per game")
#active_axis.set_xlabel("Model variations")
axs[0].set_xticklabels(names_naprej, rotation = -45)
axs[1].bar(names_solobrez, values_solobrez)
axs[1].title.set_text("Playing solo")
#active_axis.set_ylim([7, 35])
#active_axis.set_ylabel("Average points per game")
#active_axis.set_xlabel("Model variations")
axs[1].set_xticklabels(names_solobrez, rotation = -45)
plt.show()
def overall_averages_with_cq(index):
naprej_povprecja_0 = []
naprej_povprecja_1 = []
naprej_povprecja_2 = []
solobrez_povprecja_0 = []
solobrez_povprecja_1 = []
solobrez_povprecja_2 = []
for model in MODELS:
games_naprej_0 = index[model]["Naprej"]["0"]
games_naprej_1 = index[model]["Naprej"]["1"]
games_naprej_2 = index[model]["Naprej"]["2"]
naprej_povprecja_0.append((model, avg_points(games_naprej_0)))
naprej_povprecja_1.append((model, avg_points(games_naprej_1)))
naprej_povprecja_2.append((model, avg_points(games_naprej_2)))
games_solobrez_0 = index[model]["Solo brez"]["0"]
games_solobrez_1 = index[model]["Solo brez"]["1"]
games_solobrez_2 = index[model]["Solo brez"]["2"]
solobrez_povprecja_0.append((model, avg_points(games_solobrez_0)))
solobrez_povprecja_1.append((model, avg_points(games_solobrez_1)))
solobrez_povprecja_2.append((model, avg_points(games_solobrez_2)))
fig, axs = plt.subplots(1,2)
fig.suptitle("Best performing models playing with Silicijasti tarokist with respect to quality of cards")
fig.text(0.5, 0.02, 'Models', ha='center')
fig.text(0.08, 0.5, 'Average points per game', va='center', rotation='vertical')
urejeno_naprej_0 = list(sorted(naprej_povprecja_0, key= lambda x: x[1]))
values_naprej_0 = [x[1] for x in urejeno_naprej_0]
names_naprej_0 = [x[0] for x in urejeno_naprej_0]
urejeno_naprej_1 = list(sorted(naprej_povprecja_1, key= lambda x: x[1]))
values_naprej_1 = [x[1] for x in urejeno_naprej_1]
names_naprej_1 = [x[0] + " " for x in urejeno_naprej_1]
urejeno_naprej_2 = list(sorted(naprej_povprecja_2, key= lambda x: x[1]))
values_naprej_2 = [x[1] for x in urejeno_naprej_2]
names_naprej_2 = [x[0] + " " for x in urejeno_naprej_2]
urejeno_solobrez_0 = list(sorted(solobrez_povprecja_0, key= lambda x: x[1]))
values_solobrez_0 = [x[1] for x in urejeno_solobrez_0]
names_solobrez_0 = [x[0] for x in urejeno_solobrez_0]
urejeno_solobrez_1 = list(sorted(solobrez_povprecja_1, key= lambda x: x[1]))
values_solobrez_1 = [x[1] for x in urejeno_solobrez_1]
names_solobrez_1 = [x[0] + " " for x in urejeno_solobrez_1]
urejeno_solobrez_2 = list(sorted(solobrez_povprecja_2, key= lambda x: x[1]))
values_solobrez_2 = [x[1] for x in urejeno_solobrez_2]
names_solobrez_2 = [x[0] + " " for x in urejeno_solobrez_2]
axs[0].bar(names_naprej_0, values_naprej_0, label="Any cards")
axs[0].bar(names_naprej_1, values_naprej_2, label="Good cards")
axs[0].bar(names_naprej_2, values_naprej_2, label="Great cards")
axs[0].title.set_text("Playing duo")
#active_axis.set_ylim([7, 35])
#active_axis.set_ylabel("Average points per game")
#active_axis.set_xlabel("Model variations")
axs[0].set_xticklabels(names_naprej_0 + names_naprej_1 + names_naprej_2, rotation = -45)
axs[1].bar(names_solobrez_0, values_solobrez_0, label="Any cards")
axs[1].bar(names_solobrez_1, values_solobrez_1, label="Good cards")
axs[1].bar(names_solobrez_2, values_solobrez_2, label="Great cards")
axs[1].title.set_text("Playing solo")
#active_axis.set_ylim([7, 35])
#active_axis.set_ylabel("Average points per game")
#active_axis.set_xlabel("Model variations")
axs[1].set_xticklabels(names_solobrez_0 + names_solobrez_1 + names_solobrez_2, rotation = -45)
plt.legend(loc="upper right")
plt.show()
if __name__ == "__main__":
index = index_results()
overall_averages(index)
overall_averages_with_cq(index) |
987,899 | b7de30b1c813f283204514124a13c80fcc16600b | from openpyxl import Workbook
from openpyxl import load_workbook
def write_to_excel(data):
''' function to write data to a new excel file '''
wb = Workbook()
ws = wb.active
# write data to the rows as long as there is data
for row, url in zip(ws.iter_rows(max_row=len(data)), data):
for cell in row:
cell.value = url
wb.save('urls.xlsx')
print('--- done writing to excel ---')
def append_to_excel(data):
''' function to append data to an existing excel file '''
wb = load_workbook('urls.xlsx')
ws = wb.active
# ws.max_row returns the last row with data written in it
start = ws.max_row
# write data to rows that start after the last already existing row as long as there is data
for row, url in zip(ws.iter_rows(min_row=start, max_row=start+len(data), max_col=1), data):
for cell in row:
cell.value = url
wb.save('urls.xlsx')
print('--- done appending to excel ---') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.