code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from ply import lex
from beamr.lexers.generic import t_error # Used internally by lex() @UnusedImport
import beamr.interpreters
import beamr.debug as dbg
tokens = ('COMMENT', 'RAW', 'HEADING', 'SLIDE', 'SCISSOR', 'MACRO', 'YAML', 'TEXT')
def t_COMMENT(t):
r'#.*(?=(\n|$))'
t.value = beamr.interpreters.Comment(t.value, **_argLineno(t.lexer, t.value))
return t
def t_RAW(t):
r'\n(?P<RAW_INDENT> *)&{(?P<RAW_TXT>[\s\S]+?)\n(?P=RAW_INDENT)}'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Text(gd['RAW_TXT'] + '\n\n', **_argLineno(t.lexer, t.value))
return t
def t_HEADING(t):
r'\n.+\n[_~=-]{4,}(?=\n)'
t.value = beamr.interpreters.Heading(t.value, **_argLineno(t.lexer, t.value))
return t
def t_SLIDE(t):
r'\n\[(?P<SLD_PLAIN>\/)?(?P<SLD_ALIGN>[_^])?(?P<SLD_OPTS>\S*) ?(?P<SLD_TITLE>.*)(\n~(?P<SLD_BG>[^\n|]*)(?P<SLD_BGUP>\|)?)?(?P<SLD_CONTENT>[\s\S]*?)\n\]'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Slide(
title=gd['SLD_TITLE'],
opts=gd['SLD_OPTS'],
plain=gd['SLD_PLAIN'],
align=gd['SLD_ALIGN'],
bg=gd['SLD_BG'],
bgUp=gd['SLD_BGUP'],
content=gd['SLD_CONTENT'],
**_argLineno(t.lexer, t.value))
return t
def t_SCISSOR(t):
r'(8<|>8){.+?}'
t.value = beamr.interpreters.ScissorEnv(t.value[3:-1], **_argLineno(t.lexer, t.value))
return t
def t_MACRO(t):
r'%{[\s\S]+?}'
t.value = beamr.interpreters.Macro(txt = t.value[2:-1], **_argLineno(t.lexer, t.value))
return t
def t_YAML(t):
r'\n---(?=\n)[\s\S]*?(\n\.\.\.|$)'
t.value = beamr.interpreters.Config(t.value, **_argLineno(t.lexer, t.value))
return t
def t_TEXT(t):
r'[\s\S]+?(?=(\n|\[|#|$|>|8|&|%))'
t.lexer.lineno += t.value.count('\n')
return t
lexer = lex.lex(debug=dbg.verbose, reflags=0)
def _argLineno(lexer, text):
'''
Return a dictionary of the lexer in use, line number at the moment, and line number
post evaluation of text (useful for passing to interpreter constructors)
:param lexer: Current lexer instance in use
:param text: Text being evaluated
'''
lineno = lexer.lineno
nextlineno = lineno + text.count('\n')
return {'lexer': lexer,
'lineno': lineno,
'nextlineno': nextlineno} | beamr/lexers/document.py | from ply import lex
from beamr.lexers.generic import t_error # Used internally by lex() @UnusedImport
import beamr.interpreters
import beamr.debug as dbg
tokens = ('COMMENT', 'RAW', 'HEADING', 'SLIDE', 'SCISSOR', 'MACRO', 'YAML', 'TEXT')
def t_COMMENT(t):
r'#.*(?=(\n|$))'
t.value = beamr.interpreters.Comment(t.value, **_argLineno(t.lexer, t.value))
return t
def t_RAW(t):
r'\n(?P<RAW_INDENT> *)&{(?P<RAW_TXT>[\s\S]+?)\n(?P=RAW_INDENT)}'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Text(gd['RAW_TXT'] + '\n\n', **_argLineno(t.lexer, t.value))
return t
def t_HEADING(t):
r'\n.+\n[_~=-]{4,}(?=\n)'
t.value = beamr.interpreters.Heading(t.value, **_argLineno(t.lexer, t.value))
return t
def t_SLIDE(t):
r'\n\[(?P<SLD_PLAIN>\/)?(?P<SLD_ALIGN>[_^])?(?P<SLD_OPTS>\S*) ?(?P<SLD_TITLE>.*)(\n~(?P<SLD_BG>[^\n|]*)(?P<SLD_BGUP>\|)?)?(?P<SLD_CONTENT>[\s\S]*?)\n\]'
gd = t.lexer.lexmatch.groupdict()
t.value = beamr.interpreters.Slide(
title=gd['SLD_TITLE'],
opts=gd['SLD_OPTS'],
plain=gd['SLD_PLAIN'],
align=gd['SLD_ALIGN'],
bg=gd['SLD_BG'],
bgUp=gd['SLD_BGUP'],
content=gd['SLD_CONTENT'],
**_argLineno(t.lexer, t.value))
return t
def t_SCISSOR(t):
r'(8<|>8){.+?}'
t.value = beamr.interpreters.ScissorEnv(t.value[3:-1], **_argLineno(t.lexer, t.value))
return t
def t_MACRO(t):
r'%{[\s\S]+?}'
t.value = beamr.interpreters.Macro(txt = t.value[2:-1], **_argLineno(t.lexer, t.value))
return t
def t_YAML(t):
r'\n---(?=\n)[\s\S]*?(\n\.\.\.|$)'
t.value = beamr.interpreters.Config(t.value, **_argLineno(t.lexer, t.value))
return t
def t_TEXT(t):
r'[\s\S]+?(?=(\n|\[|#|$|>|8|&|%))'
t.lexer.lineno += t.value.count('\n')
return t
lexer = lex.lex(debug=dbg.verbose, reflags=0)
def _argLineno(lexer, text):
'''
Return a dictionary of the lexer in use, line number at the moment, and line number
post evaluation of text (useful for passing to interpreter constructors)
:param lexer: Current lexer instance in use
:param text: Text being evaluated
'''
lineno = lexer.lineno
nextlineno = lineno + text.count('\n')
return {'lexer': lexer,
'lineno': lineno,
'nextlineno': nextlineno} | 0.433742 | 0.198958 |
import random, math
from itertools import *
def respond(challenge, f, g):
n = len(challenge)
a = [f[challenge[i]] for i in range(0,n)]
b = [0 for i in range(0,n)]
b[0]=int(g[(a[0]+a[-1]) % 10])
for i in range(1,n):
b[i] = int(g[(b[i-1]+a[i]) % 10])
return b
def checkg(g, pairs):
f = {}
for pair in pairs:
n = len(pair[0])
for i in range(1,n):
if pair[0][i] not in f:
f[pair[0][i]]=(int(g[g.index(pair[1][i])-1])-int(pair[1][i-1])) % 10
elif f[pair[0][i]]!= ((int(g[g.index(pair[1][i])-1])-int(pair[1][i-1])) % 10):
return False,f
if pair[0][0] not in f:
f[pair[0][0]]=(int(g[g.index(pair[1][0])-1])-f[pair[0][n-1]]) % 10
elif f[pair[0][0]]!=((int(g[g.index(pair[1][0])-1])-f[pair[0][n-1]]) % 10):
return False,f
return True,f
def crackfg(pairs):
templist = permutations('123456789')
numperm = 0
for gtemp in templist:
g1='0'+''.join(gtemp)
[check, f1] = checkg(g1,pairs)
if check:
f = f1
g = g1
numperm +=1
print('#surviving permutations = ', numperm)
return numperm, f, g
def passwordgame():
pairs=[]
more = 'Y'
f={}
g=''
numperm=0
sofar = ''
print('Please use only lower case letters for challenges and digits for responses.')
while more == 'Y' or more == 'y':
challenge = input('Next Challenge? ')
if numperm==1 and not list(filter(lambda x: x not in sofar, challenge)):
print('Response = ', ''.join((str(x) for x in respond(challenge,f,g))))
else:
sofar = sofar+challenge
response = input('Response? ')
pairs.append([challenge, response])
[numperm, f, g] = crackfg(pairs)
print('A feasible character-to-digit map: ',f)
print('A feasible digit permutation: ', g)
if numperm > 0:
more = input('play again? (Y/N)')
else: more = 'N'
passwordgame() | code/kjk.py | import random, math
from itertools import *
def respond(challenge, f, g):
n = len(challenge)
a = [f[challenge[i]] for i in range(0,n)]
b = [0 for i in range(0,n)]
b[0]=int(g[(a[0]+a[-1]) % 10])
for i in range(1,n):
b[i] = int(g[(b[i-1]+a[i]) % 10])
return b
def checkg(g, pairs):
f = {}
for pair in pairs:
n = len(pair[0])
for i in range(1,n):
if pair[0][i] not in f:
f[pair[0][i]]=(int(g[g.index(pair[1][i])-1])-int(pair[1][i-1])) % 10
elif f[pair[0][i]]!= ((int(g[g.index(pair[1][i])-1])-int(pair[1][i-1])) % 10):
return False,f
if pair[0][0] not in f:
f[pair[0][0]]=(int(g[g.index(pair[1][0])-1])-f[pair[0][n-1]]) % 10
elif f[pair[0][0]]!=((int(g[g.index(pair[1][0])-1])-f[pair[0][n-1]]) % 10):
return False,f
return True,f
def crackfg(pairs):
templist = permutations('123456789')
numperm = 0
for gtemp in templist:
g1='0'+''.join(gtemp)
[check, f1] = checkg(g1,pairs)
if check:
f = f1
g = g1
numperm +=1
print('#surviving permutations = ', numperm)
return numperm, f, g
def passwordgame():
pairs=[]
more = 'Y'
f={}
g=''
numperm=0
sofar = ''
print('Please use only lower case letters for challenges and digits for responses.')
while more == 'Y' or more == 'y':
challenge = input('Next Challenge? ')
if numperm==1 and not list(filter(lambda x: x not in sofar, challenge)):
print('Response = ', ''.join((str(x) for x in respond(challenge,f,g))))
else:
sofar = sofar+challenge
response = input('Response? ')
pairs.append([challenge, response])
[numperm, f, g] = crackfg(pairs)
print('A feasible character-to-digit map: ',f)
print('A feasible digit permutation: ', g)
if numperm > 0:
more = input('play again? (Y/N)')
else: more = 'N'
passwordgame() | 0.110771 | 0.163112 |
import discord
import time
import stripe
from config import *
from datetime import date, datetime, timedelta
from discord.ui import Button, View
from discord.ext import commands
from discord.utils import get
from math import perm
from string import digits
# Create today event for general embed information
today = date.today()
current_time = datetime.utcnow()
# Enable Stripe system (if enabled)
if Config.stripe['enabled']:
stripe.api_key = Config.stripe['key']
# Create bot event
intents = discord.Intents.default()
intents.members = True
intents.message_content = True
# Create class for starting bot to allow for persistent views
class StartBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or(Config.prefix), intents=intents)
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
self.add_view(PersistentVerification())
self.add_view(DonationView())
self.persistent_views_added = True
print(f"We have logged in as {bot.user}")
if Config.bot_status['enabled']:
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=Config.bot_status['message']), status=Config.bot_status['status'])
class PersistentVerification(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(custom_id='persistent_view:verification', label='Verify', style=discord.ButtonStyle.gray)
async def verification(self, button: discord.ui.Button, interaction: discord.Interaction):
guild = bot.get_guild(Config.guild)
unverified = guild.get_role(Config.verification_system['unverifiedRole'])
verified = guild.get_role(Config.verification_system['verifiedRole'])
member = guild.get_member(interaction.user.id)
if verified is None or unverified is None:
return
else:
await member.remove_roles(unverified)
await member.add_roles(verified)
await interaction.response.send_message("You have verified yourself. Welcome to the server!", ephemeral=True)
if Config.stripe['enabled']:
class DonationDropdown(discord.ui.Select):
def __init__(self):
options = [
discord.SelectOption(label="5", description="$5"),
discord.SelectOption(label="10", description="$10"),
discord.SelectOption(label="15", description="$15"),
discord.SelectOption(label="20", description="$20"),
discord.SelectOption(label="25", description="$25"),
]
super().__init__(
custom_id="persistent_view:donation",
placeholder="Select Your Donation Amount",
min_values=1,
max_values=1,
options=options,
)
async def callback(self, interaction: discord.Interaction):
amount = int(self.values[0])
customer = stripe.Customer.create(name=interaction.user.display_name, description = f"{interaction.user.display_name}#{interaction.user.discriminator}")
product = stripe.InvoiceItem.create(currency = 'usd', customer = customer.id, amount = (amount * 100))
invoice = stripe.Invoice.create(customer = customer.id)
stripe.Invoice.finalize_invoice(invoice.id)
redirect = stripe.Invoice.retrieve(invoice.id)
embed = await embed_builder("Donation Page Created", f"Your donation has been setup. Please click the button below to complete your donation for the amount of ${amount}.")
view = await createLinkButton("Finalize Donation", f"{redirect.hosted_invoice_url}")
await interaction.user.send(embed=embed, view=view)
await interaction.response.send_message(f"Check your DMs to complete your donation of ${self.values[0]}", ephemeral=True)
class DonationView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
# Adds the dropdown to our view object.
self.add_item(DonationDropdown())
bot = StartBot()
@bot.event
async def on_member_join(member):
# If welcome_channel module is enabled
if Config.welcome_channel['enabled']:
date = member.created_at
embed = await embed_builder("Welcome User!", f"<@{member.id}> ({member.display_name}#{member.discriminator}) has joined the server.\n\n**Account Age**:\n{date.strftime('%x')}, {date.strftime('%X')}", thumbnail = True, footer = True)
welcome_channel = bot.get_channel(Config.welcome_channel['channelID'])
await welcome_channel.send(embed=embed)
# If welcome_channel module doesn't have a default role and the verification_system module is enable
if not Config.welcome_channel['defaultRole'] and Config.verification_system['enabled'] == True:
# Add unverifiedRole
role = discord.utils.get(bot.get_guild(member.guild.id).roles, id=Config.verification_system['unverifiedRole'])
await member.add_roles(role)
# Send private message to user to verify themselves
embed = await embed_builder("Verify Yourself", f"To get access to the server, you need to verify yourself. Please click the button below to continue.", thumbnail = True, footer = True)
await member.send(embed=embed, view=PersistentVerification())
# If welcome_channel module does have a default role or the verification_system module is disabled
else:
role = discord.utils.get(bot.get_guild(member.guild.id).roles, id=Config.welcome_channel['defaultRole'])
await member.add_roles(role)
@bot.event
async def on_message(message):
if len(message.content) > 0:
if str(message.content[0]) != str(Config.prefix):
bad_word = any(word.lower() in message.content.lower() for word in Config.filtered)
if bad_word and not message.author.get_role(Config.admin_role):
await message.delete()
else:
await bot.process_commands(message)
@bot.command(name="embed", alias="sayem")
async def embed(ctx, *, embed = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
if not embed:
await show_temporary_message(ctx, "Empty Embed", "No embed was provided. Please try again.")
else:
embed = await embed_builder("", embed, member = ctx.author)
await ctx.send(embed=embed)
else:
await permission_denied(ctx)
@bot.command(name="timeout", alias="mute")
async def timeout(ctx, member: discord.Member, time = "", *, reason = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
if not time:
await show_temporary_message(ctx, "No Time", "No time was set for the timeout. Please try again.")
else:
length = ''.join(i for i in time if not i.isdigit())
time_int = [int(word) for word in list(time) if word.isdigit()]
match length.lower():
case 'm':
duration = current_time + timedelta(minutes=time_int[0])
case 'h':
duration = current_time + timedelta(hours=time_int[0])
case _:
duration = current_time + timedelta(minutes=time_int[0])
await member.timeout(duration, reason=reason)
await show_temporary_message(ctx, "User Timed out", f"{member.mention} was timed out.")
else:
await permission_denied(ctx)
@bot.command(name="kick", alias="remove")
async def kick(ctx, member: discord.Member, *, reason = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
await member.kick(reason=reason)
await show_temporary_message(ctx, "User Kicked", f"{member.mention} was kicked.")
else:
await permission_denied(ctx)
@bot.command(name="ban", alias="perm")
async def ban(ctx, member: discord.Member, *, reason = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
await member.ban(reason=reason)
await show_temporary_message(ctx, "User Banned", f"{member.mention} was banned.")
else:
await permission_denied(ctx)
@bot.command(name="purge", alias="clear")
async def clear(ctx, amount: int):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
await ctx.channel.purge(limit=amount)
await show_temporary_message(ctx, "Channel Cleared", f"The last **{amount}** messages in this channel have been removed.")
else:
await permission_denied(ctx)
if Config.stripe['enabled']:
@bot.command()
async def createDonation(ctx, channel: discord.TextChannel, *, body = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
if not body:
await show_temporary_message(ctx, "No Body Provided", "No body was provided for the creation of the embed. Please try again.")
else:
embed = await embed_builder(f"{Config.name} Donations", body)
await channel.send(embed=embed, view=DonationView())
else:
await permission_denied(ctx)
# Function to show permission denied
async def permission_denied(ctx):
await ctx.message.delete()
embed = await embed_builder("Permission Denied", "You're not allowed to perform this action.")
message = await ctx.send(embed=embed)
time.sleep(5)
await message.delete()
# Function to show temporary message
async def show_temporary_message(ctx, title, content):
embed = await embed_builder(title, content, footer = False)
message = await ctx.send(embed=embed)
time.sleep(10)
await message.delete()
# Function for building embeds
async def embed_builder(title, description, member = "", thumbnail = False, footer = True):
embed = discord.Embed(title=title, description=description, color=discord.Color.from_rgb(18,95,217))
if footer:
embed.set_footer(text=f"© {Config.name} {today.strftime('%Y')} • {today.strftime('%m/%d/%Y')}")
if member:
embed.set_author(name=f"{member.display_name}#{member.discriminator}", icon_url=member.avatar.url)
if thumbnail:
embed.set_thumbnail(url=f"{Config.logo}")
return embed
# Function for asking questions (might be removed in future versions)
async def askQuestion(ctx, channel, question):
def check(m):
return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id
embed = await embed_builder("", question, False, False)
msg = await channel.send(embed=embed)
response = await bot.wait_for(event = 'message', check = check, timeout = 60.0)
await response.delete()
await msg.delete()
return response
# Function to create a View with a link button
async def createLinkButton(label, url):
view = View()
button = Button(label=label, style=discord.ButtonStyle.link, url=url)
view.add_item(button)
return view
bot.run(Config.token) | main.py | import discord
import time
import stripe
from config import *
from datetime import date, datetime, timedelta
from discord.ui import Button, View
from discord.ext import commands
from discord.utils import get
from math import perm
from string import digits
# Create today event for general embed information
today = date.today()
current_time = datetime.utcnow()
# Enable Stripe system (if enabled)
if Config.stripe['enabled']:
stripe.api_key = Config.stripe['key']
# Create bot event
intents = discord.Intents.default()
intents.members = True
intents.message_content = True
# Create class for starting bot to allow for persistent views
class StartBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or(Config.prefix), intents=intents)
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
self.add_view(PersistentVerification())
self.add_view(DonationView())
self.persistent_views_added = True
print(f"We have logged in as {bot.user}")
if Config.bot_status['enabled']:
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=Config.bot_status['message']), status=Config.bot_status['status'])
class PersistentVerification(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(custom_id='persistent_view:verification', label='Verify', style=discord.ButtonStyle.gray)
async def verification(self, button: discord.ui.Button, interaction: discord.Interaction):
guild = bot.get_guild(Config.guild)
unverified = guild.get_role(Config.verification_system['unverifiedRole'])
verified = guild.get_role(Config.verification_system['verifiedRole'])
member = guild.get_member(interaction.user.id)
if verified is None or unverified is None:
return
else:
await member.remove_roles(unverified)
await member.add_roles(verified)
await interaction.response.send_message("You have verified yourself. Welcome to the server!", ephemeral=True)
if Config.stripe['enabled']:
class DonationDropdown(discord.ui.Select):
def __init__(self):
options = [
discord.SelectOption(label="5", description="$5"),
discord.SelectOption(label="10", description="$10"),
discord.SelectOption(label="15", description="$15"),
discord.SelectOption(label="20", description="$20"),
discord.SelectOption(label="25", description="$25"),
]
super().__init__(
custom_id="persistent_view:donation",
placeholder="Select Your Donation Amount",
min_values=1,
max_values=1,
options=options,
)
async def callback(self, interaction: discord.Interaction):
amount = int(self.values[0])
customer = stripe.Customer.create(name=interaction.user.display_name, description = f"{interaction.user.display_name}#{interaction.user.discriminator}")
product = stripe.InvoiceItem.create(currency = 'usd', customer = customer.id, amount = (amount * 100))
invoice = stripe.Invoice.create(customer = customer.id)
stripe.Invoice.finalize_invoice(invoice.id)
redirect = stripe.Invoice.retrieve(invoice.id)
embed = await embed_builder("Donation Page Created", f"Your donation has been setup. Please click the button below to complete your donation for the amount of ${amount}.")
view = await createLinkButton("Finalize Donation", f"{redirect.hosted_invoice_url}")
await interaction.user.send(embed=embed, view=view)
await interaction.response.send_message(f"Check your DMs to complete your donation of ${self.values[0]}", ephemeral=True)
class DonationView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
# Adds the dropdown to our view object.
self.add_item(DonationDropdown())
bot = StartBot()
@bot.event
async def on_member_join(member):
# If welcome_channel module is enabled
if Config.welcome_channel['enabled']:
date = member.created_at
embed = await embed_builder("Welcome User!", f"<@{member.id}> ({member.display_name}#{member.discriminator}) has joined the server.\n\n**Account Age**:\n{date.strftime('%x')}, {date.strftime('%X')}", thumbnail = True, footer = True)
welcome_channel = bot.get_channel(Config.welcome_channel['channelID'])
await welcome_channel.send(embed=embed)
# If welcome_channel module doesn't have a default role and the verification_system module is enable
if not Config.welcome_channel['defaultRole'] and Config.verification_system['enabled'] == True:
# Add unverifiedRole
role = discord.utils.get(bot.get_guild(member.guild.id).roles, id=Config.verification_system['unverifiedRole'])
await member.add_roles(role)
# Send private message to user to verify themselves
embed = await embed_builder("Verify Yourself", f"To get access to the server, you need to verify yourself. Please click the button below to continue.", thumbnail = True, footer = True)
await member.send(embed=embed, view=PersistentVerification())
# If welcome_channel module does have a default role or the verification_system module is disabled
else:
role = discord.utils.get(bot.get_guild(member.guild.id).roles, id=Config.welcome_channel['defaultRole'])
await member.add_roles(role)
@bot.event
async def on_message(message):
if len(message.content) > 0:
if str(message.content[0]) != str(Config.prefix):
bad_word = any(word.lower() in message.content.lower() for word in Config.filtered)
if bad_word and not message.author.get_role(Config.admin_role):
await message.delete()
else:
await bot.process_commands(message)
@bot.command(name="embed", alias="sayem")
async def embed(ctx, *, embed = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
if not embed:
await show_temporary_message(ctx, "Empty Embed", "No embed was provided. Please try again.")
else:
embed = await embed_builder("", embed, member = ctx.author)
await ctx.send(embed=embed)
else:
await permission_denied(ctx)
@bot.command(name="timeout", alias="mute")
async def timeout(ctx, member: discord.Member, time = "", *, reason = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
if not time:
await show_temporary_message(ctx, "No Time", "No time was set for the timeout. Please try again.")
else:
length = ''.join(i for i in time if not i.isdigit())
time_int = [int(word) for word in list(time) if word.isdigit()]
match length.lower():
case 'm':
duration = current_time + timedelta(minutes=time_int[0])
case 'h':
duration = current_time + timedelta(hours=time_int[0])
case _:
duration = current_time + timedelta(minutes=time_int[0])
await member.timeout(duration, reason=reason)
await show_temporary_message(ctx, "User Timed out", f"{member.mention} was timed out.")
else:
await permission_denied(ctx)
@bot.command(name="kick", alias="remove")
async def kick(ctx, member: discord.Member, *, reason = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
await member.kick(reason=reason)
await show_temporary_message(ctx, "User Kicked", f"{member.mention} was kicked.")
else:
await permission_denied(ctx)
@bot.command(name="ban", alias="perm")
async def ban(ctx, member: discord.Member, *, reason = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
await member.ban(reason=reason)
await show_temporary_message(ctx, "User Banned", f"{member.mention} was banned.")
else:
await permission_denied(ctx)
@bot.command(name="purge", alias="clear")
async def clear(ctx, amount: int):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
await ctx.channel.purge(limit=amount)
await show_temporary_message(ctx, "Channel Cleared", f"The last **{amount}** messages in this channel have been removed.")
else:
await permission_denied(ctx)
if Config.stripe['enabled']:
@bot.command()
async def createDonation(ctx, channel: discord.TextChannel, *, body = ""):
if ctx.author.get_role(Config.admin_role):
await ctx.message.delete()
if not body:
await show_temporary_message(ctx, "No Body Provided", "No body was provided for the creation of the embed. Please try again.")
else:
embed = await embed_builder(f"{Config.name} Donations", body)
await channel.send(embed=embed, view=DonationView())
else:
await permission_denied(ctx)
# Function to show permission denied
async def permission_denied(ctx):
await ctx.message.delete()
embed = await embed_builder("Permission Denied", "You're not allowed to perform this action.")
message = await ctx.send(embed=embed)
time.sleep(5)
await message.delete()
# Function to show temporary message
async def show_temporary_message(ctx, title, content):
embed = await embed_builder(title, content, footer = False)
message = await ctx.send(embed=embed)
time.sleep(10)
await message.delete()
# Function for building embeds
async def embed_builder(title, description, member = "", thumbnail = False, footer = True):
embed = discord.Embed(title=title, description=description, color=discord.Color.from_rgb(18,95,217))
if footer:
embed.set_footer(text=f"© {Config.name} {today.strftime('%Y')} • {today.strftime('%m/%d/%Y')}")
if member:
embed.set_author(name=f"{member.display_name}#{member.discriminator}", icon_url=member.avatar.url)
if thumbnail:
embed.set_thumbnail(url=f"{Config.logo}")
return embed
# Function for asking questions (might be removed in future versions)
async def askQuestion(ctx, channel, question):
def check(m):
return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id
embed = await embed_builder("", question, False, False)
msg = await channel.send(embed=embed)
response = await bot.wait_for(event = 'message', check = check, timeout = 60.0)
await response.delete()
await msg.delete()
return response
# Function to create a View with a link button
async def createLinkButton(label, url):
view = View()
button = Button(label=label, style=discord.ButtonStyle.link, url=url)
view.add_item(button)
return view
bot.run(Config.token) | 0.427158 | 0.079246 |
from pathlib import Path
import pandas as pd
import pytest
from ertk.dataset.annotation import read_annotations, write_annotations
from .constants import test_data_dir
def test_read_annotations_str_1() -> None:
annotations = read_annotations(test_data_dir / "annot2_str.csv", dtype=str)
assert type(annotations["extra1"]) is str
assert annotations["extra1"] == "1"
assert annotations["extra2"] == "0"
assert annotations["1002_DFA_DIS_XX"] == "1"
assert annotations["1002_DFA_HAP_XX"] == "3"
def test_read_annotations_str_2() -> None:
annotations = read_annotations(test_data_dir / "label.csv", dtype=str)
assert type(annotations["extra1"]) is str
assert annotations["extra1"] == "none"
assert annotations["extra2"] == "sadness"
assert annotations["1002_DFA_DIS_XX"] == "disgust"
assert annotations["1002_DFA_HAP_XX"] == "happiness"
def test_read_annotations_float() -> None:
annotations = read_annotations(test_data_dir / "annot1.csv", dtype=float)
assert type(annotations["extra1"]) is float
assert annotations["extra1"] == 0
assert annotations["extra2"] == 1
assert annotations["1001_DFA_HAP_XX"] == pytest.approx(3.8472069)
assert annotations["1001_DFA_NEU_XX"] == pytest.approx(6.9999)
def test_write_annotations_str(tmp_path: Path) -> None:
mapping = {"clip1": "x", "clip2": "y", "clip3": "z"}
out_path = tmp_path / "annotaton.csv"
write_annotations(mapping, "test_annot", out_path)
df = pd.read_csv(out_path, header=0, index_col=0, dtype=str)
assert list(df.index) == ["clip1", "clip2", "clip3"]
assert df.columns[0] == "test_annot"
assert df.loc["clip1", "test_annot"] == "x"
def test_write_annotations_float(tmp_path: Path) -> None:
mapping = {"clip1": 1.5, "clip2": 2, "clip3": 0.9}
out_path = tmp_path / "annotaton_float.csv"
write_annotations(mapping, "test_annot2", out_path)
df = pd.read_csv(out_path, header=0, index_col=0, dtype={0: str, 1: float})
assert list(df.index) == ["clip1", "clip2", "clip3"]
assert df.columns[0] == "test_annot2"
assert df.loc["clip2", "test_annot2"] == 2
assert df.loc["clip3", "test_annot2"] == pytest.approx(0.9) | tests/dataset/test_annotation.py | from pathlib import Path
import pandas as pd
import pytest
from ertk.dataset.annotation import read_annotations, write_annotations
from .constants import test_data_dir
def test_read_annotations_str_1() -> None:
annotations = read_annotations(test_data_dir / "annot2_str.csv", dtype=str)
assert type(annotations["extra1"]) is str
assert annotations["extra1"] == "1"
assert annotations["extra2"] == "0"
assert annotations["1002_DFA_DIS_XX"] == "1"
assert annotations["1002_DFA_HAP_XX"] == "3"
def test_read_annotations_str_2() -> None:
annotations = read_annotations(test_data_dir / "label.csv", dtype=str)
assert type(annotations["extra1"]) is str
assert annotations["extra1"] == "none"
assert annotations["extra2"] == "sadness"
assert annotations["1002_DFA_DIS_XX"] == "disgust"
assert annotations["1002_DFA_HAP_XX"] == "happiness"
def test_read_annotations_float() -> None:
annotations = read_annotations(test_data_dir / "annot1.csv", dtype=float)
assert type(annotations["extra1"]) is float
assert annotations["extra1"] == 0
assert annotations["extra2"] == 1
assert annotations["1001_DFA_HAP_XX"] == pytest.approx(3.8472069)
assert annotations["1001_DFA_NEU_XX"] == pytest.approx(6.9999)
def test_write_annotations_str(tmp_path: Path) -> None:
mapping = {"clip1": "x", "clip2": "y", "clip3": "z"}
out_path = tmp_path / "annotaton.csv"
write_annotations(mapping, "test_annot", out_path)
df = pd.read_csv(out_path, header=0, index_col=0, dtype=str)
assert list(df.index) == ["clip1", "clip2", "clip3"]
assert df.columns[0] == "test_annot"
assert df.loc["clip1", "test_annot"] == "x"
def test_write_annotations_float(tmp_path: Path) -> None:
mapping = {"clip1": 1.5, "clip2": 2, "clip3": 0.9}
out_path = tmp_path / "annotaton_float.csv"
write_annotations(mapping, "test_annot2", out_path)
df = pd.read_csv(out_path, header=0, index_col=0, dtype={0: str, 1: float})
assert list(df.index) == ["clip1", "clip2", "clip3"]
assert df.columns[0] == "test_annot2"
assert df.loc["clip2", "test_annot2"] == 2
assert df.loc["clip3", "test_annot2"] == pytest.approx(0.9) | 0.719285 | 0.51312 |
import os
import pdb
import pipfile
from dep_appearances.dependency import Dependency
from dep_appearances.import_statement import ImportStatement
class AppearancesReport:
def __init__(self, project_root):
self.project_root = os.path.abspath(project_root)
self.dependencies = []
def compile(self):
self.dependencies = self._dependencies_with_imports()
return self
def unused_dependencies(self):
unused_deps = [dep for dep in self.dependencies if dep.unused()]
return sorted(unused_deps, key=lambda dep: dep.name)
def underused_dependencies(self, usage_threshold):
deps = [dep for dep in self.dependencies if dep.underused(usage_threshold=usage_threshold)]
return sorted(deps, key=lambda dep: dep.name)
def _dependencies_with_imports(self):
dependencies = self._extract_dependencies()
import_statements = self._extract_import_statements()
for dep in dependencies:
for import_statement in import_statements:
if dep.imported_by(import_statement):
dep.add_import_statement(import_statement)
return dependencies
def _extract_dependencies(self):
dependencies = []
pfile = pipfile.load(os.path.join(self.project_root, "Pipfile"))
for package in pfile.data["default"].keys():
dependencies.append(package)
for package in pfile.data["develop"].keys():
dependencies.append(package)
return [Dependency(dependency) for dependency in dependencies]
def _extract_import_statements(self):
import_statements = []
for root, _dirs, files in os.walk(self.project_root):
if root.startswith(os.path.abspath(f"{self.project_root}/.venv")):
continue
for file in files:
if os.path.splitext(file)[1].lower() == ".py":
import_statements += self._extract_imports_from_py(os.path.join(root, file))
return import_statements
def _extract_imports_from_py(self, file):
imports = []
with open(file) as f:
line_number = 0
for line in f:
line_number += 1
if ImportStatement.test(line):
import_statement = ImportStatement(
source_file=file,
source_code=line,
line_number=line_number
)
imports.append(import_statement)
return imports | src/dep_appearances/appearances_report.py | import os
import pdb
import pipfile
from dep_appearances.dependency import Dependency
from dep_appearances.import_statement import ImportStatement
class AppearancesReport:
def __init__(self, project_root):
self.project_root = os.path.abspath(project_root)
self.dependencies = []
def compile(self):
self.dependencies = self._dependencies_with_imports()
return self
def unused_dependencies(self):
unused_deps = [dep for dep in self.dependencies if dep.unused()]
return sorted(unused_deps, key=lambda dep: dep.name)
def underused_dependencies(self, usage_threshold):
deps = [dep for dep in self.dependencies if dep.underused(usage_threshold=usage_threshold)]
return sorted(deps, key=lambda dep: dep.name)
def _dependencies_with_imports(self):
dependencies = self._extract_dependencies()
import_statements = self._extract_import_statements()
for dep in dependencies:
for import_statement in import_statements:
if dep.imported_by(import_statement):
dep.add_import_statement(import_statement)
return dependencies
def _extract_dependencies(self):
dependencies = []
pfile = pipfile.load(os.path.join(self.project_root, "Pipfile"))
for package in pfile.data["default"].keys():
dependencies.append(package)
for package in pfile.data["develop"].keys():
dependencies.append(package)
return [Dependency(dependency) for dependency in dependencies]
def _extract_import_statements(self):
import_statements = []
for root, _dirs, files in os.walk(self.project_root):
if root.startswith(os.path.abspath(f"{self.project_root}/.venv")):
continue
for file in files:
if os.path.splitext(file)[1].lower() == ".py":
import_statements += self._extract_imports_from_py(os.path.join(root, file))
return import_statements
def _extract_imports_from_py(self, file):
imports = []
with open(file) as f:
line_number = 0
for line in f:
line_number += 1
if ImportStatement.test(line):
import_statement = ImportStatement(
source_file=file,
source_code=line,
line_number=line_number
)
imports.append(import_statement)
return imports | 0.296654 | 0.078572 |
__all__ = (
'MalformedNetworkData',
'ServerMessage',
'read_demo_file',
'clear_cache',
)
import dataclasses
import enum
import functools
import inspect
import math
import os
import struct
class MalformedNetworkData(Exception):
pass
def _read(f, n):
s = f.read(n)
if len(s) != n:
raise MalformedNetworkData
return s
class ProtocolFlags(enum.IntFlag):
SHORTANGLE = (1 << 1)
FLOATANGLE = (1 << 2)
_24BITCOORD = (1 << 3)
FLOATCOORD = (1 << 4)
EDICTSCALE = (1 << 5)
ALPHASANITY = (1 << 6)
INT32COORD = (1 << 7)
MOREFLAGS = (1 << 31)
class ProtocolVersion(enum.IntEnum):
NETQUAKE = 15
FITZQUAKE = 666
RMQ = 999
@dataclasses.dataclass
class Protocol:
version: ProtocolVersion
flags: ProtocolFlags
class TempEntityTypes(enum.IntEnum):
SPIKE = 0
SUPERSPIKE = 1
GUNSHOT = 2
EXPLOSION = 3
TAREXPLOSION = 4
LIGHTNING1 = 5
LIGHTNING2 = 6
WIZSPIKE = 7
KNIGHTSPIKE = 8
LIGHTNING3 = 9
LAVASPLASH = 10
TELEPORT = 11
EXPLOSION2 = 12
BEAM = 13
class ServerMessageType(enum.Enum):
BAD = 0
NOP = 1
DISCONNECT = 2
UPDATESTAT = 3
VERSION = 4
SETVIEW = 5
SOUND = 6
TIME = 7
PRINT = 8
STUFFTEXT = 9
SETANGLE = 10
SERVERINFO = 11
LIGHTSTYLE = 12
UPDATENAME = 13
UPDATEFRAGS = 14
CLIENTDATA = 15
STOPSOUND = 16
UPDATECOLORS = 17
PARTICLE = 18
DAMAGE = 19
SPAWNSTATIC = 20
SPAWNBINARY = 21
SPAWNBASELINE = 22
TEMP_ENTITY = 23
SETPAUSE = 24
SIGNONNUM = 25
CENTERPRINT = 26
KILLEDMONSTER = 27
FOUNDSECRET = 28
SPAWNSTATICSOUND = 29
INTERMISSION = 30
FINALE = 31
CDTRACK = 32
SELLSCREEN = 33
CUTSCENE = 34
UPDATE = 128
# protocol 666 message types
SKYBOX = 37
BF = 40
FOG = 41
SPAWNBASELINE2 = 42
SPAWNSTATIC2 = 43
SPAWNSTATICSOUND2 = 44
class ItemFlags(enum.IntFlag):
SHOTGUN = 1
SUPER_SHOTGUN = 2
NAILGUN = 4
SUPER_NAILGUN = 8
GRENADE_LAUNCHER = 16
ROCKET_LAUNCHER = 32
LIGHTNING = 64
SUPER_LIGHTNING = 128
SHELLS = 256
NAILS = 512
ROCKETS = 1024
CELLS = 2048
AXE = 4096
ARMOR1 = 8192
ARMOR2 = 16384
ARMOR3 = 32768
SUPERHEALTH = 65536
KEY1 = 131072
KEY2 = 262144
INVISIBILITY = 524288
INVULNERABILITY = 1048576
SUIT = 2097152
QUAD = 4194304
SIGIL1 = (1<<28)
SIGIL2 = (1<<29)
SIGIL3 = (1<<30)
SIGIL4 = (1<<31)
class _UpdateFlags(enum.IntFlag):
MOREBITS = (1<<0)
ORIGIN1 = (1<<1)
ORIGIN2 = (1<<2)
ORIGIN3 = (1<<3)
ANGLE2 = (1<<4)
STEP = (1<<5)
FRAME = (1<<6)
SIGNAL = (1<<7)
ANGLE1 = (1<<8)
ANGLE3 = (1<<9)
MODEL = (1<<10)
COLORMAP = (1<<11)
SKIN = (1<<12)
EFFECTS = (1<<13)
LONGENTITY = (1<<14)
# protocol 666 flags
EXTEND1 = (1<<15)
ALPHA = (1<<16)
FRAME2 = (1<<17)
MODEL2 = (1<<18)
LERPFINISH = (1<<19)
SCALE = (1<<20)
UNUSED21 = (1<<21)
UNUSED22 = (1<<22)
EXTEND2 = (1<<23)
@classmethod
def fitzquake_flags(cls):
return (cls.ALPHA | cls.FRAME2 | cls.MODEL2 | cls.LERPFINISH | cls.SCALE |
cls.UNUSED21 | cls.UNUSED22)
class _ClientDataFlags(enum.IntFlag):
VIEWHEIGHT = 1<<0
IDEALPITCH = 1<<1
PUNCH1 = 1<<2
PUNCH2 = 1<<3
PUNCH3 = 1<<4
VELOCITY1 = 1<<5
VELOCITY2 = 1<<6
VELOCITY3 = 1<<7
UNUSED8 = 1<<8
ITEMS = 1<<9
ONGROUND = 1<<10
INWATER = 1<<11
WEAPONFRAME = 1<<12
ARMOR = 1<<13
WEAPON = 1<<14
# protocol 666 flags
EXTEND1 = 1<<15
WEAPON2 = 1<<16
ARMOR2 = 1<<17
AMMO2 = 1<<18
SHELLS2 = 1<<19
NAILS2 = 1<<20
ROCKETS2 = 1<<21
CELLS2 = 1<<22
EXTEND2 = 1<<23
WEAPONFRAME2 = 1<<24
WEAPONALPHA = 1<<25
UNUSED26 = 1<<26
UNUSED27 = 1<<27
UNUSED28 = 1<<28
UNUSED29 = 1<<29
UNUSED30 = 1<<30
EXTEND3 = 1<<31
@classmethod
def fitzquake_flags(cls):
return (cls.EXTEND1 | cls.WEAPON2 | cls.ARMOR2 | cls.AMMO2 | cls.SHELLS2 | cls.NAILS2 |
cls.ROCKETS2 | cls.CELLS2 | cls.EXTEND2 | cls.WEAPONFRAME2 | cls.WEAPONALPHA |
cls.UNUSED26 | cls.UNUSED27 | cls.UNUSED28 | cls.UNUSED29 | cls.UNUSED30 |
cls.EXTEND3)
class _SoundFlags(enum.IntFlag):
VOLUME = (1<<0)
ATTENUATION = (1<<1)
LOOPING = (1<<2)
# protocol 666 flags
LARGEENTITY = (1<<3)
LARGESOUND = (1<<4)
@classmethod
def fitzquake_flags(cls):
return _SoundFlags.LARGEENTITY | _SoundFlags.LARGESOUND
class _BaselineBits(enum.IntFlag):
LARGEMODEL = (1<<0)
LARGEFRAME = (1<<1)
ALPHA = (1<<2)
_MESSAGE_CLASSES = {}
def _register_server_message(cls):
_MESSAGE_CLASSES[cls.msg_type] = cls
_DEFAULT_VIEW_HEIGHT = 22
_DEFAULT_SOUND_PACKET_ATTENUATION = 1.0
_DEFAULT_SOUND_PACKET_VOLUME = 255
class ServerMessage:
protocols = set(ProtocolVersion)
field_names = None
@classmethod
@functools.lru_cache(None)
def _get_sig(cls):
return inspect.Signature([inspect.Parameter(n, inspect.Parameter.POSITIONAL_OR_KEYWORD)
for n in cls.field_names])
def __init__(self, *args, **kwargs):
bound_args = self._get_sig().bind(*args, **kwargs)
for key, val in bound_args.arguments.items():
setattr(self, key, val)
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join("{}={!r}".format(k, getattr(self, k)) for k in self.field_names))
@classmethod
def _parse_struct(cls, fmt, m):
size = struct.calcsize(fmt)
return struct.unpack(fmt, m[:size]), m[size:]
@classmethod
def _parse_string(cls, m):
if b'\0' not in m:
raise MalformedNetworkData('Null terminator not found')
idx = m.index(b'\0')
return m[:idx].decode('latin'), m[idx + 1:]
@classmethod
def _parse_angle(cls, m, protocol):
proto_flags = int(protocol.flags)
if proto_flags & int(ProtocolFlags.FLOATANGLE):
(angle,), m = cls._parse_struct("<f", m)
angle = math.pi * angle / 180
elif proto_flags & int(ProtocolFlags.SHORTANGLE):
(angle,), m = cls._parse_struct("<h", m)
angle = math.pi * angle / 32768
else:
angle, m = m[0], m[1:]
angle = angle * math.pi / 128.
return angle, m
@classmethod
def _parse_coord(cls, m, protocol):
proto_flags = int(protocol.flags)
if proto_flags & int(ProtocolFlags.FLOATCOORD):
(coord,), m = cls._parse_struct("<f", m)
elif proto_flags & int(ProtocolFlags.INT32COORD):
(coord,), m = cls._parse_struct("<i", m)
coord = coord / 16
elif proto_flags & int(ProtocolFlags._24BITCOORD):
high, low = cls._parsestruct("<hB", m)
coord = x1 + x2 / 255
else:
(coord,), m = cls._parse_struct("<h", m)
coord = coord / 8
return coord, m
@classmethod
def _parse_angle_optional(cls, bit, flags, m, protocol):
if int(bit) & int(flags):
angle, m = cls._parse_angle(m, protocol)
else:
angle = None
return angle, m
@classmethod
def _parse_coord_optional(cls, bit, flags, m, protocol):
if int(bit) & int(flags):
coord, m = cls._parse_coord(m, protocol)
else:
coord = None
return coord, m
@classmethod
def _parse_tuple(cls, n, el_parser, m, protocol):
l = []
for _ in range(n):
x, m = el_parser(m, protocol)
l.append(x)
return tuple(l), m
@classmethod
def _parse_angles(cls, m, protocol):
return cls._parse_tuple(3, cls._parse_angle, m, protocol)
@classmethod
def _parse_coords(cls, m, protocol):
return cls._parse_tuple(3, cls._parse_coord, m, protocol)
@classmethod
def _parse_optional(cls, bit, flags, fmt, m, post_func=None, default=None):
if int(bit) & int(flags):
(val,), m = cls._parse_struct(fmt, m)
if post_func:
val = post_func(val)
return val, m
else:
return default, m
@classmethod
def _parse_upper_byte(cls, bit, flags, lower_byte, m):
upper_byte, m = cls._parse_optional(bit, flags, "<B", m)
if upper_byte is not None:
if lower_byte is None:
raise MalformedNetworkData(f'Lower byte present but upper byte not present')
assert (lower_byte & 0xff) == lower_byte
out = (upper_byte << 8) | lower_byte
else:
out = lower_byte
return out, m
@classmethod
def parse_message(cls, m, protocol):
msg_type_int = m[0]
if msg_type_int & int(_UpdateFlags.SIGNAL):
msg_cls = ServerMessageUpdate
else:
try:
msg_type = ServerMessageType(msg_type_int)
except ValueError:
raise MalformedNetworkData("Invalid message type {}".format(msg_type_int))
try:
msg_cls = _MESSAGE_CLASSES[msg_type]
except KeyError:
raise MalformedNetworkData("No handler for message type {}".format(msg_type))
if protocol is not None and protocol.version not in msg_cls.protocols:
raise MalformedNetworkData(f"Received {msg_type} message but protocol is {protocol.version}")
m = m[1:]
return msg_cls.parse(m, protocol)
@classmethod
def parse(cls, m, protocol):
raise NotImplementedError
class StructServerMessage(ServerMessage):
@classmethod
def parse(cls, m, protocol):
vals, m = cls._parse_struct(cls.fmt, m)
return cls(**dict(zip(cls.field_names, vals))), m
class ServerMessageUpdate(ServerMessage):
msg_type = ServerMessageType.UPDATE
field_names = (
'entity_num',
'model_num',
'frame',
'colormap',
'skin',
'effects',
'origin',
'angle',
'step',
)
_size_cache = {}
_msg_cache = {}
@classmethod
def clear_cache(cls):
cls._size_cache = {}
cls._msg_cache = {}
@classmethod
def _parse_flags_fast(cls, m, protocol):
"""Parse out flags but for efficiency don't convert to enum types.
In addition test against numbers rather than enum values to avoid the extra lookups.
"""
flags = m[0]
n = 1
if flags & 1: # MOREBITS
flags |= (m[n] << 8)
n += 1
if flags & (1 << 15): # EXTEND1
flags |= m[n] << 16
n += 1
if flags & (1 << 23): # EXTEND2
flags |= m[n] << 24
n += 1
return flags, m[n:]
@classmethod
def _parse_flags_safe(cls, m, protocol):
"""Like _parse_flags_fast but converts to enum type and does some checks.
Used when a cache miss occurs to check that _parse_flags_fast is returning the same value.
"""
flags, m = _UpdateFlags(m[0]), m[1:]
assert flags & _UpdateFlags.SIGNAL
if flags & _UpdateFlags.MOREBITS:
more_flags, m = m[0], m[1:]
flags |= (more_flags << 8)
if protocol.version != ProtocolVersion.NETQUAKE:
if flags & _UpdateFlags.EXTEND1:
extend1_flags, m = m[0], m[1:]
flags |= extend1_flags << 16
if flags & _UpdateFlags.EXTEND2:
extend2_flags, m = m[0], m[1:]
flags |= extend2_flags << 24
else:
fq_flags = flags & _UpdateFlags.fitzquake_flags()
if fq_flags:
raise MalformedNetworkData(f'{fq_flags} passed but protocol is {protocol}')
return flags, m
@classmethod
def _parse_no_cache(cls, flags, m, protocol):
(entity_num,), m = cls._parse_struct("<H" if flags & _UpdateFlags.LONGENTITY else "<B", m)
model_num, m = cls._parse_optional(_UpdateFlags.MODEL, flags, "<B", m)
frame, m = cls._parse_optional(_UpdateFlags.FRAME, flags, "<B", m)
colormap, m = cls._parse_optional(_UpdateFlags.COLORMAP, flags, "<B", m)
skin, m = cls._parse_optional(_UpdateFlags.SKIN, flags, "<B", m)
effects, m = cls._parse_optional(_UpdateFlags.EFFECTS, flags, "<B", m)
fix_coord = lambda c: c / 8.
fix_angle = lambda a: a * math.pi / 128.
origin1, m = cls._parse_coord_optional(_UpdateFlags.ORIGIN1, flags, m, protocol)
angle1, m = cls._parse_angle_optional(_UpdateFlags.ANGLE1, flags, m, protocol)
origin2, m = cls._parse_coord_optional(_UpdateFlags.ORIGIN2, flags, m, protocol)
angle2, m = cls._parse_angle_optional(_UpdateFlags.ANGLE2, flags, m, protocol)
origin3, m = cls._parse_coord_optional(_UpdateFlags.ORIGIN3, flags, m, protocol)
angle3, m = cls._parse_angle_optional(_UpdateFlags.ANGLE3, flags, m, protocol)
origin = (origin1, origin2, origin3)
angle = (angle1, angle2, angle3)
if protocol.version != ProtocolVersion.NETQUAKE:
# TODO: Store alpha / scale / lerpfinish
alpha, m = cls._parse_optional(_UpdateFlags.ALPHA, flags, "<B", m)
scale, m = cls._parse_optional(_UpdateFlags.SCALE, flags, "<B", m)
frame, m = cls._parse_upper_byte(_UpdateFlags.FRAME2, flags, frame, m)
model_num, m = cls._parse_upper_byte(_UpdateFlags.MODEL2, flags, model_num, m)
lerp_finish, m = cls._parse_optional(_UpdateFlags.LERPFINISH, flags, "<B", m)
step = bool(flags & _UpdateFlags.STEP)
return cls(entity_num,
model_num,
frame,
colormap,
skin,
effects,
origin,
angle,
step), m, flags
@classmethod
def parse(cls, m, protocol):
int_flags, m_after_flags = cls._parse_flags_fast(m, protocol)
msg = None
size = cls._size_cache.get(int_flags)
if size is not None:
msg = cls._msg_cache.get(m[:size])
if msg is None:
flags, _ = cls._parse_flags_safe(m, protocol)
assert flags == int_flags, f"flags={flags} int_flags={int_flags}"
msg, m_after, flags = cls._parse_no_cache(flags, m_after_flags, protocol)
size = len(m) - len(m_after)
cls._size_cache[flags] = size
cls._msg_cache[m[:size]] = msg
return msg, m[size:]
class NoFieldsServerMessage(ServerMessage):
field_names = ()
@classmethod
def parse(cls, m, protocol):
return cls(), m
@_register_server_message
class ServerMessageNop(NoFieldsServerMessage):
msg_type = ServerMessageType.NOP
@_register_server_message
class ServerMessageFoundSecret(NoFieldsServerMessage):
msg_type = ServerMessageType.FOUNDSECRET
@_register_server_message
class ServerMessageBonusFlash(NoFieldsServerMessage):
protocols = {ProtocolVersion.FITZQUAKE}
msg_type = ServerMessageType.BF
@_register_server_message
class ServerMessageFog(ServerMessage):
field_names = ('density', 'color', 'time')
protocols = {ProtocolVersion.FITZQUAKE}
msg_type = ServerMessageType.FOG
@classmethod
def parse(cls, m, protocol):
(density, r, g, b, time_short), m = cls._parse_struct("<BBBBH", m)
return cls(density, (r, g, b), time_short / 100.), m
@_register_server_message
class ServerMessagePrint(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.PRINT
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageCenterPrint(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.CENTERPRINT
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageCutScene(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.CUTSCENE
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageStuffText(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.STUFFTEXT
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageSkybox(ServerMessage):
protocols = {ProtocolVersion.FITZQUAKE}
name = ('string',)
msg_type = ServerMessageType.SKYBOX
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
class _SpawnStaticSoundBase(ServerMessage):
field_names = ("origin", "sound_num", "vol", "atten")
@classmethod
def _parse_generic(cls, m, protocol, version):
origin, m = cls._parse_coords(m, protocol)
fmt = "<HBB" if version == 2 else "<BBB"
(sound_num, vol, atten), m = cls._parse_struct(fmt, m)
return cls(origin, sound_num, vol, atten), m
@_register_server_message
class ServerMessageSpawnStaticSound(_SpawnStaticSoundBase):
msg_type = ServerMessageType.SPAWNSTATICSOUND
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, 1)
@_register_server_message
class ServerMessageSpawnStaticSound2(_SpawnStaticSoundBase):
msg_type = ServerMessageType.SPAWNSTATICSOUND2
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, 2)
@_register_server_message
class ServerMessageCdTrack(StructServerMessage):
fmt = "<BB"
field_names = ("track", "loop")
msg_type = ServerMessageType.CDTRACK
@_register_server_message
class ServerMessageSetView(StructServerMessage):
fmt = "<H"
field_names = ("viewentity",)
msg_type = ServerMessageType.SETVIEW
@_register_server_message
class ServerMessageSignOnNum(StructServerMessage):
fmt = "<B"
field_names = ("num",)
msg_type = ServerMessageType.SIGNONNUM
@_register_server_message
class ServerMessageSetPause(StructServerMessage):
fmt = "<B"
field_names = ("paused",)
msg_type = ServerMessageType.SETPAUSE
class _SpawnBaselineBase(ServerMessage):
@classmethod
def _parse_generic(cls, m, protocol, include_entity_num, version):
if include_entity_num:
(entity_num,), m = cls._parse_struct("<H", m)
if version == 2:
(bits,), m = cls._parse_struct("<B", m)
bits = _BaselineBits(bits)
fmt = (f"{'H' if bits & _BaselineBits.LARGEMODEL else 'B'}"
f"{'H' if bits & _BaselineBits.LARGEFRAME else 'B'}"
"BB")
else:
bits = _BaselineBits(0)
fmt = "<BBBB"
(model_num, frame, colormap, skin), m = cls._parse_struct(fmt, m)
origin, angles = [], []
for _ in range(3):
o, m = cls._parse_coord(m, protocol)
a, m = cls._parse_angle(m, protocol)
origin.append(o)
angles.append(a)
if bits & _BaselineBits.ALPHA:
# TODO: Store alpha
(alpha,), m = cls._parse_struct("<B", m)
if include_entity_num:
return cls(entity_num, model_num, frame, colormap, skin, tuple(origin), tuple(angles)), m
else:
return cls(model_num, frame, colormap, skin, tuple(origin), tuple(angles)), m
@_register_server_message
class ServerMessageSpawnBaseline(_SpawnBaselineBase):
field_names = ("entity_num", "model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNBASELINE
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, True, 1)
@_register_server_message
class ServerMessageSpawnBaseline2(_SpawnBaselineBase):
protocols = {ProtocolVersion.FITZQUAKE, ProtocolVersion.RMQ}
field_names = ("entity_num", "model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNBASELINE2
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, True, 2)
@_register_server_message
class ServerMessageSpawnStatic(_SpawnBaselineBase):
field_names = ("model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNSTATIC
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, False, 1)
@_register_server_message
class ServerMessageSpawnStatic2(_SpawnBaselineBase):
protocols = {ProtocolVersion.FITZQUAKE, ProtocolVersion.RMQ}
field_names = ("model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNSTATIC2
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, False, 2)
@_register_server_message
class ServerMessageTime(StructServerMessage):
fmt = "<f"
field_names = ("time",)
msg_type = ServerMessageType.TIME
@_register_server_message
class ServerMessageUpdateName(ServerMessage):
msg_type = ServerMessageType.UPDATENAME
field_names = ('client_num', 'name')
@classmethod
def parse(cls, m, protocol):
client_num, m = m[0], m[1:]
name, m = cls._parse_string(m)
return cls(client_num, name), m
@_register_server_message
class ServerMessageUpdateFrags(StructServerMessage):
fmt = "<BH"
field_names = ("client_num", "count")
msg_type = ServerMessageType.UPDATEFRAGS
@_register_server_message
class ServerMessageUpdateColors(StructServerMessage):
fmt = "<BB"
field_names = ("client_num", "color")
msg_type = ServerMessageType.UPDATECOLORS
@_register_server_message
class ServerMessageLightStyle(ServerMessage):
field_names = ('index', 'style')
msg_type = ServerMessageType.LIGHTSTYLE
@classmethod
def parse(cls, m, protocol):
index, m = m[0], m[1:]
style, m = cls._parse_string(m)
return cls(index, style), m
@_register_server_message
class ServerMessageUpdateStat(StructServerMessage):
fmt = "<BI"
field_names = ('index', 'value')
msg_type = ServerMessageType.UPDATESTAT
@_register_server_message
class ServerMessageSetAngle(ServerMessage):
field_names = ('view_angles',)
msg_type = ServerMessageType.SETANGLE
@classmethod
def parse(cls, m, protocol):
view_angles, m = cls._parse_angles(m, protocol)
return cls(view_angles), m
@_register_server_message
class ServerMessageServerInfo(ServerMessage):
field_names = ('protocol', 'max_clients', 'game_type', 'level_name', 'models', 'sounds')
msg_type = ServerMessageType.SERVERINFO
@classmethod
def _parse_string_list(cls, m):
l = []
while True:
s, m = cls._parse_string(m)
if not s:
break
l.append(s)
return l, m
@classmethod
def parse(cls, m, protocol):
(protocol_version,), m = cls._parse_struct("<I", m)
protocol_version = ProtocolVersion(protocol_version)
if protocol_version == ProtocolVersion.RMQ:
(protocol_flags,), m = cls._parse_struct("<I", m)
protocol_flags = ProtocolFlags(protocol_flags)
else:
protocol_flags = ProtocolFlags(0)
next_protocol = Protocol(protocol_version, protocol_flags)
(max_clients, game_type), m = cls._parse_struct("<BB", m)
level_name, m = cls._parse_string(m)
models, m = cls._parse_string_list(m)
sounds, m = cls._parse_string_list(m)
return cls(next_protocol, max_clients, game_type, level_name, models, sounds), m
@_register_server_message
class ServerMessageClientData(ServerMessage):
field_names = (
'view_height',
'ideal_pitch',
'punch_angles',
'm_velocity',
'items',
'on_ground',
'in_water',
'weapon_frame',
'armor',
'weapon_model_index',
'health',
'ammo',
'shells',
'nails',
'rockets',
'cells',
'active_weapon',
)
msg_type = ServerMessageType.CLIENTDATA
@classmethod
def parse(cls, m, protocol):
(flags_int,), m = cls._parse_struct("<H", m)
flags = _ClientDataFlags(flags_int)
if protocol.version != ProtocolVersion.NETQUAKE:
if flags & _ClientDataFlags.EXTEND1:
extend1_flags, m = m[0], m[1:]
flags |= extend1_flags << 16
if flags & _ClientDataFlags.EXTEND2:
extend1_flags, m = m[0], m[1:]
flags |= extend1_flags << 24
else:
fq_flags = flags & _ClientDataFlags.fitzquake_flags()
if fq_flags:
raise MalformedNetworkData(f'{fq_flags} passed but protocol is {protocol}')
view_height, m = cls._parse_optional(_ClientDataFlags.VIEWHEIGHT, flags, "<B", m,
default=_DEFAULT_VIEW_HEIGHT)
ideal_pitch, m = cls._parse_optional(_ClientDataFlags.IDEALPITCH, flags, "<B", m, default=0)
fix_velocity = lambda v: v * 16
punch1, m = cls._parse_optional(_ClientDataFlags.PUNCH1, flags, "<B", m, default=0)
m_velocity1, m = cls._parse_optional(_ClientDataFlags.VELOCITY1, flags, "<b", m, fix_velocity,
default=0)
punch2, m = cls._parse_optional(_ClientDataFlags.PUNCH2, flags, "<B", m, default=0)
m_velocity2, m = cls._parse_optional(_ClientDataFlags.VELOCITY2, flags, "<b", m, fix_velocity,
default=0)
punch3, m = cls._parse_optional(_ClientDataFlags.PUNCH3, flags, "<B", m, default=0)
m_velocity3, m = cls._parse_optional(_ClientDataFlags.VELOCITY3, flags, "<b", m, fix_velocity,
default=0)
punch_angles = (punch1, punch2, punch3)
m_velocity = (m_velocity1, m_velocity2, m_velocity3)
(items_int,), m = cls._parse_struct("<I", m)
items = ItemFlags(items_int)
on_ground = bool(flags & _ClientDataFlags.ONGROUND)
in_water = bool(flags & _ClientDataFlags.INWATER)
weapon_frame, m = cls._parse_optional(_ClientDataFlags.WEAPONFRAME, flags, "<B", m, default=0)
armor, m = cls._parse_optional(_ClientDataFlags.ARMOR, flags, "<B", m, default=0)
weapon_model_index, m = cls._parse_optional(_ClientDataFlags.WEAPON, flags, "<B", m, default=0)
(health, ammo, shells, nails, rockets, cells, active_weapon), m = cls._parse_struct("<HBBBBBB", m)
active_weapon = ItemFlags(active_weapon)
if protocol.version != ProtocolVersion.NETQUAKE:
weapon_model_index, m = cls._parse_upper_byte(_ClientDataFlags.WEAPON2, flags, weapon_model_index, m)
armor, m = cls._parse_upper_byte(_ClientDataFlags.ARMOR2, flags, armor, m)
ammo, m = cls._parse_upper_byte(_ClientDataFlags.AMMO2, flags, ammo, m)
shells, m = cls._parse_upper_byte(_ClientDataFlags.SHELLS2, flags, shells, m)
nails, m = cls._parse_upper_byte(_ClientDataFlags.NAILS2, flags, nails, m)
rockets, m = cls._parse_upper_byte(_ClientDataFlags.ROCKETS2, flags, rockets, m)
cells, m = cls._parse_upper_byte(_ClientDataFlags.CELLS2, flags, cells, m)
weapon_frame, m = cls._parse_upper_byte(_ClientDataFlags.WEAPONFRAME2, flags, weapon_frame, m)
# TODO: Store weapon alpha
weapon_alpha, m = cls._parse_optional(_ClientDataFlags.WEAPONALPHA, flags, "<B", m)
return cls(
view_height,
ideal_pitch,
punch_angles,
m_velocity,
items,
on_ground,
in_water,
weapon_frame,
armor,
weapon_model_index,
health,
ammo,
shells,
nails,
rockets,
cells,
active_weapon
), m
@_register_server_message
class ServerMessageSound(ServerMessage):
field_names = ('volume', 'attenuation', 'entity_num', 'channel', 'sound_num', 'pos')
msg_type = ServerMessageType.SOUND
@classmethod
def parse(cls, m, protocol):
flags, m = _SoundFlags(m[0]), m[1:]
volume, m = cls._parse_optional(_SoundFlags.VOLUME, flags, "<B", m,
default=_DEFAULT_SOUND_PACKET_VOLUME)
attenuation, m = cls._parse_optional(_SoundFlags.ATTENUATION, flags, "<B", m, lambda b: b / 64.,
default=_DEFAULT_SOUND_PACKET_ATTENUATION)
if protocol.version == ProtocolVersion.NETQUAKE:
fq_flags = flags & _SoundFlags.fitzquake_flags()
if fq_flags:
raise MalformedNetworkData(f'{fq_flags} passed but protocol is {protocol}')
if flags & _SoundFlags.LARGEENTITY:
(ent, channel), m = cls._parse_struct("<HB", m)
else:
(t,), m = cls._parse_struct("<H", m)
entity_num = t >> 3
channel = t & 7
sound_num, m = cls._parse_struct("<H" if flags & _SoundFlags.LARGESOUND else "<B", m)
pos, m = cls._parse_coords(m, protocol)
return cls(volume, attenuation, entity_num, channel, sound_num, pos), m
@_register_server_message
class ServerMessageParticle(ServerMessage):
field_names = ('origin', 'direction', 'count', 'color')
msg_type = ServerMessageType.PARTICLE
@classmethod
def parse(cls, m, protocol):
origin, m = cls._parse_coords(m, protocol)
direction, m = cls._parse_struct("<bbb", m)
direction = tuple(x / 16. for x in direction)
count, m = m[0], m[1:]
if count == 255:
count = 1024
color, m = m[0], m[1:]
return cls(origin, direction, count, color), m
@_register_server_message
class ServerMessageTempEntity(ServerMessage):
field_names = ('temp_entity_type', 'entity_num', 'origin', 'end', 'color_start', 'color_length')
msg_type = ServerMessageType.TEMP_ENTITY
@classmethod
def parse(cls, m, protocol):
temp_entity_type, m = TempEntityTypes(m[0]), m[1:]
if temp_entity_type in (TempEntityTypes.LIGHTNING1, TempEntityTypes.LIGHTNING2, TempEntityTypes.LIGHTNING3,
TempEntityTypes.BEAM):
(entity_num,), m = cls._parse_struct("<H", m)
origin, m = cls._parse_coords(m, protocol)
end, m = cls._parse_coords(m, protocol)
else:
origin, m = cls._parse_coords(m, protocol)
end = None
entity_num = None
if temp_entity_type == TempEntityTypes.EXPLOSION2:
color_start, color_length, m = m[0], m[1], m[2:]
else:
color_start, color_length = None, None
return cls(temp_entity_type, entity_num, origin, end, color_start, color_length), m
@_register_server_message
class ServerMessageKilledMonster(NoFieldsServerMessage):
msg_type = ServerMessageType.KILLEDMONSTER
@_register_server_message
class ServerMessageIntermission(NoFieldsServerMessage):
msg_type = ServerMessageType.INTERMISSION
@_register_server_message
class ServerMessageFinale(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.FINALE
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageDisconnect(NoFieldsServerMessage):
msg_type = ServerMessageType.DISCONNECT
@_register_server_message
class ServerMessageDamage(ServerMessage):
field_names = ('armor', 'blood', 'origin')
msg_type = ServerMessageType.DAMAGE
@classmethod
def parse(cls, m, protocol):
armor, blood, m = m[0], m[1], m[2:]
origin, m = cls._parse_coords(m, protocol)
return cls(armor, blood, origin), m
def read_demo_file(f):
while _read(f, 1) != b'\n':
pass
demo_header_fmt = "<Ifff"
demo_header_size = struct.calcsize(demo_header_fmt)
protocol = None
while True:
d = f.read(demo_header_size)
if len(d) == 0:
break
if len(d) < demo_header_size:
raise MalformedNetworkData
msg_len, *view_angles = struct.unpack(demo_header_fmt, d)
msg = _read(f, msg_len)
while msg:
parsed, msg = ServerMessage.parse_message(msg, protocol)
if parsed.msg_type == ServerMessageType.SERVERINFO:
protocol = parsed.protocol
yield not bool(msg), view_angles, parsed
def clear_cache():
"""Some messages are cached for efficient parsing of repeated messages.
Call this function to free up memory used by this cache.
"""
ServerMessageUpdate.clear_cache()
def demo_parser_main():
def f():
import sys
with open(sys.argv[1], "rb") as f:
for msg in read_demo_file(f):
if do_print:
print(msg)
do_print = bool(int(os.environ.get('PYQ_PRINT', '1')))
if int(os.environ.get('PYQ_PROFILE', '0')):
import cProfile
cProfile.runctx('f()', globals(), locals(), 'stats')
else:
f() | pyquake/proto.py |
__all__ = (
'MalformedNetworkData',
'ServerMessage',
'read_demo_file',
'clear_cache',
)
import dataclasses
import enum
import functools
import inspect
import math
import os
import struct
class MalformedNetworkData(Exception):
pass
def _read(f, n):
s = f.read(n)
if len(s) != n:
raise MalformedNetworkData
return s
class ProtocolFlags(enum.IntFlag):
SHORTANGLE = (1 << 1)
FLOATANGLE = (1 << 2)
_24BITCOORD = (1 << 3)
FLOATCOORD = (1 << 4)
EDICTSCALE = (1 << 5)
ALPHASANITY = (1 << 6)
INT32COORD = (1 << 7)
MOREFLAGS = (1 << 31)
class ProtocolVersion(enum.IntEnum):
NETQUAKE = 15
FITZQUAKE = 666
RMQ = 999
@dataclasses.dataclass
class Protocol:
version: ProtocolVersion
flags: ProtocolFlags
class TempEntityTypes(enum.IntEnum):
SPIKE = 0
SUPERSPIKE = 1
GUNSHOT = 2
EXPLOSION = 3
TAREXPLOSION = 4
LIGHTNING1 = 5
LIGHTNING2 = 6
WIZSPIKE = 7
KNIGHTSPIKE = 8
LIGHTNING3 = 9
LAVASPLASH = 10
TELEPORT = 11
EXPLOSION2 = 12
BEAM = 13
class ServerMessageType(enum.Enum):
BAD = 0
NOP = 1
DISCONNECT = 2
UPDATESTAT = 3
VERSION = 4
SETVIEW = 5
SOUND = 6
TIME = 7
PRINT = 8
STUFFTEXT = 9
SETANGLE = 10
SERVERINFO = 11
LIGHTSTYLE = 12
UPDATENAME = 13
UPDATEFRAGS = 14
CLIENTDATA = 15
STOPSOUND = 16
UPDATECOLORS = 17
PARTICLE = 18
DAMAGE = 19
SPAWNSTATIC = 20
SPAWNBINARY = 21
SPAWNBASELINE = 22
TEMP_ENTITY = 23
SETPAUSE = 24
SIGNONNUM = 25
CENTERPRINT = 26
KILLEDMONSTER = 27
FOUNDSECRET = 28
SPAWNSTATICSOUND = 29
INTERMISSION = 30
FINALE = 31
CDTRACK = 32
SELLSCREEN = 33
CUTSCENE = 34
UPDATE = 128
# protocol 666 message types
SKYBOX = 37
BF = 40
FOG = 41
SPAWNBASELINE2 = 42
SPAWNSTATIC2 = 43
SPAWNSTATICSOUND2 = 44
class ItemFlags(enum.IntFlag):
SHOTGUN = 1
SUPER_SHOTGUN = 2
NAILGUN = 4
SUPER_NAILGUN = 8
GRENADE_LAUNCHER = 16
ROCKET_LAUNCHER = 32
LIGHTNING = 64
SUPER_LIGHTNING = 128
SHELLS = 256
NAILS = 512
ROCKETS = 1024
CELLS = 2048
AXE = 4096
ARMOR1 = 8192
ARMOR2 = 16384
ARMOR3 = 32768
SUPERHEALTH = 65536
KEY1 = 131072
KEY2 = 262144
INVISIBILITY = 524288
INVULNERABILITY = 1048576
SUIT = 2097152
QUAD = 4194304
SIGIL1 = (1<<28)
SIGIL2 = (1<<29)
SIGIL3 = (1<<30)
SIGIL4 = (1<<31)
class _UpdateFlags(enum.IntFlag):
MOREBITS = (1<<0)
ORIGIN1 = (1<<1)
ORIGIN2 = (1<<2)
ORIGIN3 = (1<<3)
ANGLE2 = (1<<4)
STEP = (1<<5)
FRAME = (1<<6)
SIGNAL = (1<<7)
ANGLE1 = (1<<8)
ANGLE3 = (1<<9)
MODEL = (1<<10)
COLORMAP = (1<<11)
SKIN = (1<<12)
EFFECTS = (1<<13)
LONGENTITY = (1<<14)
# protocol 666 flags
EXTEND1 = (1<<15)
ALPHA = (1<<16)
FRAME2 = (1<<17)
MODEL2 = (1<<18)
LERPFINISH = (1<<19)
SCALE = (1<<20)
UNUSED21 = (1<<21)
UNUSED22 = (1<<22)
EXTEND2 = (1<<23)
@classmethod
def fitzquake_flags(cls):
return (cls.ALPHA | cls.FRAME2 | cls.MODEL2 | cls.LERPFINISH | cls.SCALE |
cls.UNUSED21 | cls.UNUSED22)
class _ClientDataFlags(enum.IntFlag):
VIEWHEIGHT = 1<<0
IDEALPITCH = 1<<1
PUNCH1 = 1<<2
PUNCH2 = 1<<3
PUNCH3 = 1<<4
VELOCITY1 = 1<<5
VELOCITY2 = 1<<6
VELOCITY3 = 1<<7
UNUSED8 = 1<<8
ITEMS = 1<<9
ONGROUND = 1<<10
INWATER = 1<<11
WEAPONFRAME = 1<<12
ARMOR = 1<<13
WEAPON = 1<<14
# protocol 666 flags
EXTEND1 = 1<<15
WEAPON2 = 1<<16
ARMOR2 = 1<<17
AMMO2 = 1<<18
SHELLS2 = 1<<19
NAILS2 = 1<<20
ROCKETS2 = 1<<21
CELLS2 = 1<<22
EXTEND2 = 1<<23
WEAPONFRAME2 = 1<<24
WEAPONALPHA = 1<<25
UNUSED26 = 1<<26
UNUSED27 = 1<<27
UNUSED28 = 1<<28
UNUSED29 = 1<<29
UNUSED30 = 1<<30
EXTEND3 = 1<<31
@classmethod
def fitzquake_flags(cls):
return (cls.EXTEND1 | cls.WEAPON2 | cls.ARMOR2 | cls.AMMO2 | cls.SHELLS2 | cls.NAILS2 |
cls.ROCKETS2 | cls.CELLS2 | cls.EXTEND2 | cls.WEAPONFRAME2 | cls.WEAPONALPHA |
cls.UNUSED26 | cls.UNUSED27 | cls.UNUSED28 | cls.UNUSED29 | cls.UNUSED30 |
cls.EXTEND3)
class _SoundFlags(enum.IntFlag):
VOLUME = (1<<0)
ATTENUATION = (1<<1)
LOOPING = (1<<2)
# protocol 666 flags
LARGEENTITY = (1<<3)
LARGESOUND = (1<<4)
@classmethod
def fitzquake_flags(cls):
return _SoundFlags.LARGEENTITY | _SoundFlags.LARGESOUND
class _BaselineBits(enum.IntFlag):
LARGEMODEL = (1<<0)
LARGEFRAME = (1<<1)
ALPHA = (1<<2)
_MESSAGE_CLASSES = {}
def _register_server_message(cls):
_MESSAGE_CLASSES[cls.msg_type] = cls
_DEFAULT_VIEW_HEIGHT = 22
_DEFAULT_SOUND_PACKET_ATTENUATION = 1.0
_DEFAULT_SOUND_PACKET_VOLUME = 255
class ServerMessage:
protocols = set(ProtocolVersion)
field_names = None
@classmethod
@functools.lru_cache(None)
def _get_sig(cls):
return inspect.Signature([inspect.Parameter(n, inspect.Parameter.POSITIONAL_OR_KEYWORD)
for n in cls.field_names])
def __init__(self, *args, **kwargs):
bound_args = self._get_sig().bind(*args, **kwargs)
for key, val in bound_args.arguments.items():
setattr(self, key, val)
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join("{}={!r}".format(k, getattr(self, k)) for k in self.field_names))
@classmethod
def _parse_struct(cls, fmt, m):
size = struct.calcsize(fmt)
return struct.unpack(fmt, m[:size]), m[size:]
@classmethod
def _parse_string(cls, m):
if b'\0' not in m:
raise MalformedNetworkData('Null terminator not found')
idx = m.index(b'\0')
return m[:idx].decode('latin'), m[idx + 1:]
@classmethod
def _parse_angle(cls, m, protocol):
proto_flags = int(protocol.flags)
if proto_flags & int(ProtocolFlags.FLOATANGLE):
(angle,), m = cls._parse_struct("<f", m)
angle = math.pi * angle / 180
elif proto_flags & int(ProtocolFlags.SHORTANGLE):
(angle,), m = cls._parse_struct("<h", m)
angle = math.pi * angle / 32768
else:
angle, m = m[0], m[1:]
angle = angle * math.pi / 128.
return angle, m
@classmethod
def _parse_coord(cls, m, protocol):
proto_flags = int(protocol.flags)
if proto_flags & int(ProtocolFlags.FLOATCOORD):
(coord,), m = cls._parse_struct("<f", m)
elif proto_flags & int(ProtocolFlags.INT32COORD):
(coord,), m = cls._parse_struct("<i", m)
coord = coord / 16
elif proto_flags & int(ProtocolFlags._24BITCOORD):
high, low = cls._parsestruct("<hB", m)
coord = x1 + x2 / 255
else:
(coord,), m = cls._parse_struct("<h", m)
coord = coord / 8
return coord, m
@classmethod
def _parse_angle_optional(cls, bit, flags, m, protocol):
if int(bit) & int(flags):
angle, m = cls._parse_angle(m, protocol)
else:
angle = None
return angle, m
@classmethod
def _parse_coord_optional(cls, bit, flags, m, protocol):
if int(bit) & int(flags):
coord, m = cls._parse_coord(m, protocol)
else:
coord = None
return coord, m
@classmethod
def _parse_tuple(cls, n, el_parser, m, protocol):
l = []
for _ in range(n):
x, m = el_parser(m, protocol)
l.append(x)
return tuple(l), m
@classmethod
def _parse_angles(cls, m, protocol):
return cls._parse_tuple(3, cls._parse_angle, m, protocol)
@classmethod
def _parse_coords(cls, m, protocol):
return cls._parse_tuple(3, cls._parse_coord, m, protocol)
@classmethod
def _parse_optional(cls, bit, flags, fmt, m, post_func=None, default=None):
if int(bit) & int(flags):
(val,), m = cls._parse_struct(fmt, m)
if post_func:
val = post_func(val)
return val, m
else:
return default, m
@classmethod
def _parse_upper_byte(cls, bit, flags, lower_byte, m):
upper_byte, m = cls._parse_optional(bit, flags, "<B", m)
if upper_byte is not None:
if lower_byte is None:
raise MalformedNetworkData(f'Lower byte present but upper byte not present')
assert (lower_byte & 0xff) == lower_byte
out = (upper_byte << 8) | lower_byte
else:
out = lower_byte
return out, m
@classmethod
def parse_message(cls, m, protocol):
msg_type_int = m[0]
if msg_type_int & int(_UpdateFlags.SIGNAL):
msg_cls = ServerMessageUpdate
else:
try:
msg_type = ServerMessageType(msg_type_int)
except ValueError:
raise MalformedNetworkData("Invalid message type {}".format(msg_type_int))
try:
msg_cls = _MESSAGE_CLASSES[msg_type]
except KeyError:
raise MalformedNetworkData("No handler for message type {}".format(msg_type))
if protocol is not None and protocol.version not in msg_cls.protocols:
raise MalformedNetworkData(f"Received {msg_type} message but protocol is {protocol.version}")
m = m[1:]
return msg_cls.parse(m, protocol)
@classmethod
def parse(cls, m, protocol):
raise NotImplementedError
class StructServerMessage(ServerMessage):
@classmethod
def parse(cls, m, protocol):
vals, m = cls._parse_struct(cls.fmt, m)
return cls(**dict(zip(cls.field_names, vals))), m
class ServerMessageUpdate(ServerMessage):
msg_type = ServerMessageType.UPDATE
field_names = (
'entity_num',
'model_num',
'frame',
'colormap',
'skin',
'effects',
'origin',
'angle',
'step',
)
_size_cache = {}
_msg_cache = {}
@classmethod
def clear_cache(cls):
cls._size_cache = {}
cls._msg_cache = {}
@classmethod
def _parse_flags_fast(cls, m, protocol):
"""Parse out flags but for efficiency don't convert to enum types.
In addition test against numbers rather than enum values to avoid the extra lookups.
"""
flags = m[0]
n = 1
if flags & 1: # MOREBITS
flags |= (m[n] << 8)
n += 1
if flags & (1 << 15): # EXTEND1
flags |= m[n] << 16
n += 1
if flags & (1 << 23): # EXTEND2
flags |= m[n] << 24
n += 1
return flags, m[n:]
@classmethod
def _parse_flags_safe(cls, m, protocol):
"""Like _parse_flags_fast but converts to enum type and does some checks.
Used when a cache miss occurs to check that _parse_flags_fast is returning the same value.
"""
flags, m = _UpdateFlags(m[0]), m[1:]
assert flags & _UpdateFlags.SIGNAL
if flags & _UpdateFlags.MOREBITS:
more_flags, m = m[0], m[1:]
flags |= (more_flags << 8)
if protocol.version != ProtocolVersion.NETQUAKE:
if flags & _UpdateFlags.EXTEND1:
extend1_flags, m = m[0], m[1:]
flags |= extend1_flags << 16
if flags & _UpdateFlags.EXTEND2:
extend2_flags, m = m[0], m[1:]
flags |= extend2_flags << 24
else:
fq_flags = flags & _UpdateFlags.fitzquake_flags()
if fq_flags:
raise MalformedNetworkData(f'{fq_flags} passed but protocol is {protocol}')
return flags, m
@classmethod
def _parse_no_cache(cls, flags, m, protocol):
(entity_num,), m = cls._parse_struct("<H" if flags & _UpdateFlags.LONGENTITY else "<B", m)
model_num, m = cls._parse_optional(_UpdateFlags.MODEL, flags, "<B", m)
frame, m = cls._parse_optional(_UpdateFlags.FRAME, flags, "<B", m)
colormap, m = cls._parse_optional(_UpdateFlags.COLORMAP, flags, "<B", m)
skin, m = cls._parse_optional(_UpdateFlags.SKIN, flags, "<B", m)
effects, m = cls._parse_optional(_UpdateFlags.EFFECTS, flags, "<B", m)
fix_coord = lambda c: c / 8.
fix_angle = lambda a: a * math.pi / 128.
origin1, m = cls._parse_coord_optional(_UpdateFlags.ORIGIN1, flags, m, protocol)
angle1, m = cls._parse_angle_optional(_UpdateFlags.ANGLE1, flags, m, protocol)
origin2, m = cls._parse_coord_optional(_UpdateFlags.ORIGIN2, flags, m, protocol)
angle2, m = cls._parse_angle_optional(_UpdateFlags.ANGLE2, flags, m, protocol)
origin3, m = cls._parse_coord_optional(_UpdateFlags.ORIGIN3, flags, m, protocol)
angle3, m = cls._parse_angle_optional(_UpdateFlags.ANGLE3, flags, m, protocol)
origin = (origin1, origin2, origin3)
angle = (angle1, angle2, angle3)
if protocol.version != ProtocolVersion.NETQUAKE:
# TODO: Store alpha / scale / lerpfinish
alpha, m = cls._parse_optional(_UpdateFlags.ALPHA, flags, "<B", m)
scale, m = cls._parse_optional(_UpdateFlags.SCALE, flags, "<B", m)
frame, m = cls._parse_upper_byte(_UpdateFlags.FRAME2, flags, frame, m)
model_num, m = cls._parse_upper_byte(_UpdateFlags.MODEL2, flags, model_num, m)
lerp_finish, m = cls._parse_optional(_UpdateFlags.LERPFINISH, flags, "<B", m)
step = bool(flags & _UpdateFlags.STEP)
return cls(entity_num,
model_num,
frame,
colormap,
skin,
effects,
origin,
angle,
step), m, flags
@classmethod
def parse(cls, m, protocol):
int_flags, m_after_flags = cls._parse_flags_fast(m, protocol)
msg = None
size = cls._size_cache.get(int_flags)
if size is not None:
msg = cls._msg_cache.get(m[:size])
if msg is None:
flags, _ = cls._parse_flags_safe(m, protocol)
assert flags == int_flags, f"flags={flags} int_flags={int_flags}"
msg, m_after, flags = cls._parse_no_cache(flags, m_after_flags, protocol)
size = len(m) - len(m_after)
cls._size_cache[flags] = size
cls._msg_cache[m[:size]] = msg
return msg, m[size:]
class NoFieldsServerMessage(ServerMessage):
field_names = ()
@classmethod
def parse(cls, m, protocol):
return cls(), m
@_register_server_message
class ServerMessageNop(NoFieldsServerMessage):
msg_type = ServerMessageType.NOP
@_register_server_message
class ServerMessageFoundSecret(NoFieldsServerMessage):
msg_type = ServerMessageType.FOUNDSECRET
@_register_server_message
class ServerMessageBonusFlash(NoFieldsServerMessage):
protocols = {ProtocolVersion.FITZQUAKE}
msg_type = ServerMessageType.BF
@_register_server_message
class ServerMessageFog(ServerMessage):
field_names = ('density', 'color', 'time')
protocols = {ProtocolVersion.FITZQUAKE}
msg_type = ServerMessageType.FOG
@classmethod
def parse(cls, m, protocol):
(density, r, g, b, time_short), m = cls._parse_struct("<BBBBH", m)
return cls(density, (r, g, b), time_short / 100.), m
@_register_server_message
class ServerMessagePrint(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.PRINT
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageCenterPrint(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.CENTERPRINT
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageCutScene(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.CUTSCENE
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageStuffText(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.STUFFTEXT
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageSkybox(ServerMessage):
protocols = {ProtocolVersion.FITZQUAKE}
name = ('string',)
msg_type = ServerMessageType.SKYBOX
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
class _SpawnStaticSoundBase(ServerMessage):
field_names = ("origin", "sound_num", "vol", "atten")
@classmethod
def _parse_generic(cls, m, protocol, version):
origin, m = cls._parse_coords(m, protocol)
fmt = "<HBB" if version == 2 else "<BBB"
(sound_num, vol, atten), m = cls._parse_struct(fmt, m)
return cls(origin, sound_num, vol, atten), m
@_register_server_message
class ServerMessageSpawnStaticSound(_SpawnStaticSoundBase):
msg_type = ServerMessageType.SPAWNSTATICSOUND
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, 1)
@_register_server_message
class ServerMessageSpawnStaticSound2(_SpawnStaticSoundBase):
msg_type = ServerMessageType.SPAWNSTATICSOUND2
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, 2)
@_register_server_message
class ServerMessageCdTrack(StructServerMessage):
fmt = "<BB"
field_names = ("track", "loop")
msg_type = ServerMessageType.CDTRACK
@_register_server_message
class ServerMessageSetView(StructServerMessage):
fmt = "<H"
field_names = ("viewentity",)
msg_type = ServerMessageType.SETVIEW
@_register_server_message
class ServerMessageSignOnNum(StructServerMessage):
fmt = "<B"
field_names = ("num",)
msg_type = ServerMessageType.SIGNONNUM
@_register_server_message
class ServerMessageSetPause(StructServerMessage):
fmt = "<B"
field_names = ("paused",)
msg_type = ServerMessageType.SETPAUSE
class _SpawnBaselineBase(ServerMessage):
@classmethod
def _parse_generic(cls, m, protocol, include_entity_num, version):
if include_entity_num:
(entity_num,), m = cls._parse_struct("<H", m)
if version == 2:
(bits,), m = cls._parse_struct("<B", m)
bits = _BaselineBits(bits)
fmt = (f"{'H' if bits & _BaselineBits.LARGEMODEL else 'B'}"
f"{'H' if bits & _BaselineBits.LARGEFRAME else 'B'}"
"BB")
else:
bits = _BaselineBits(0)
fmt = "<BBBB"
(model_num, frame, colormap, skin), m = cls._parse_struct(fmt, m)
origin, angles = [], []
for _ in range(3):
o, m = cls._parse_coord(m, protocol)
a, m = cls._parse_angle(m, protocol)
origin.append(o)
angles.append(a)
if bits & _BaselineBits.ALPHA:
# TODO: Store alpha
(alpha,), m = cls._parse_struct("<B", m)
if include_entity_num:
return cls(entity_num, model_num, frame, colormap, skin, tuple(origin), tuple(angles)), m
else:
return cls(model_num, frame, colormap, skin, tuple(origin), tuple(angles)), m
@_register_server_message
class ServerMessageSpawnBaseline(_SpawnBaselineBase):
field_names = ("entity_num", "model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNBASELINE
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, True, 1)
@_register_server_message
class ServerMessageSpawnBaseline2(_SpawnBaselineBase):
protocols = {ProtocolVersion.FITZQUAKE, ProtocolVersion.RMQ}
field_names = ("entity_num", "model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNBASELINE2
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, True, 2)
@_register_server_message
class ServerMessageSpawnStatic(_SpawnBaselineBase):
field_names = ("model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNSTATIC
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, False, 1)
@_register_server_message
class ServerMessageSpawnStatic2(_SpawnBaselineBase):
protocols = {ProtocolVersion.FITZQUAKE, ProtocolVersion.RMQ}
field_names = ("model_num", "frame", "colormap", "skin", "origin", "angles")
msg_type = ServerMessageType.SPAWNSTATIC2
@classmethod
def parse(cls, m, protocol):
return cls._parse_generic(m, protocol, False, 2)
@_register_server_message
class ServerMessageTime(StructServerMessage):
fmt = "<f"
field_names = ("time",)
msg_type = ServerMessageType.TIME
@_register_server_message
class ServerMessageUpdateName(ServerMessage):
msg_type = ServerMessageType.UPDATENAME
field_names = ('client_num', 'name')
@classmethod
def parse(cls, m, protocol):
client_num, m = m[0], m[1:]
name, m = cls._parse_string(m)
return cls(client_num, name), m
@_register_server_message
class ServerMessageUpdateFrags(StructServerMessage):
fmt = "<BH"
field_names = ("client_num", "count")
msg_type = ServerMessageType.UPDATEFRAGS
@_register_server_message
class ServerMessageUpdateColors(StructServerMessage):
fmt = "<BB"
field_names = ("client_num", "color")
msg_type = ServerMessageType.UPDATECOLORS
@_register_server_message
class ServerMessageLightStyle(ServerMessage):
field_names = ('index', 'style')
msg_type = ServerMessageType.LIGHTSTYLE
@classmethod
def parse(cls, m, protocol):
index, m = m[0], m[1:]
style, m = cls._parse_string(m)
return cls(index, style), m
@_register_server_message
class ServerMessageUpdateStat(StructServerMessage):
fmt = "<BI"
field_names = ('index', 'value')
msg_type = ServerMessageType.UPDATESTAT
@_register_server_message
class ServerMessageSetAngle(ServerMessage):
field_names = ('view_angles',)
msg_type = ServerMessageType.SETANGLE
@classmethod
def parse(cls, m, protocol):
view_angles, m = cls._parse_angles(m, protocol)
return cls(view_angles), m
@_register_server_message
class ServerMessageServerInfo(ServerMessage):
field_names = ('protocol', 'max_clients', 'game_type', 'level_name', 'models', 'sounds')
msg_type = ServerMessageType.SERVERINFO
@classmethod
def _parse_string_list(cls, m):
l = []
while True:
s, m = cls._parse_string(m)
if not s:
break
l.append(s)
return l, m
@classmethod
def parse(cls, m, protocol):
(protocol_version,), m = cls._parse_struct("<I", m)
protocol_version = ProtocolVersion(protocol_version)
if protocol_version == ProtocolVersion.RMQ:
(protocol_flags,), m = cls._parse_struct("<I", m)
protocol_flags = ProtocolFlags(protocol_flags)
else:
protocol_flags = ProtocolFlags(0)
next_protocol = Protocol(protocol_version, protocol_flags)
(max_clients, game_type), m = cls._parse_struct("<BB", m)
level_name, m = cls._parse_string(m)
models, m = cls._parse_string_list(m)
sounds, m = cls._parse_string_list(m)
return cls(next_protocol, max_clients, game_type, level_name, models, sounds), m
@_register_server_message
class ServerMessageClientData(ServerMessage):
field_names = (
'view_height',
'ideal_pitch',
'punch_angles',
'm_velocity',
'items',
'on_ground',
'in_water',
'weapon_frame',
'armor',
'weapon_model_index',
'health',
'ammo',
'shells',
'nails',
'rockets',
'cells',
'active_weapon',
)
msg_type = ServerMessageType.CLIENTDATA
@classmethod
def parse(cls, m, protocol):
(flags_int,), m = cls._parse_struct("<H", m)
flags = _ClientDataFlags(flags_int)
if protocol.version != ProtocolVersion.NETQUAKE:
if flags & _ClientDataFlags.EXTEND1:
extend1_flags, m = m[0], m[1:]
flags |= extend1_flags << 16
if flags & _ClientDataFlags.EXTEND2:
extend1_flags, m = m[0], m[1:]
flags |= extend1_flags << 24
else:
fq_flags = flags & _ClientDataFlags.fitzquake_flags()
if fq_flags:
raise MalformedNetworkData(f'{fq_flags} passed but protocol is {protocol}')
view_height, m = cls._parse_optional(_ClientDataFlags.VIEWHEIGHT, flags, "<B", m,
default=_DEFAULT_VIEW_HEIGHT)
ideal_pitch, m = cls._parse_optional(_ClientDataFlags.IDEALPITCH, flags, "<B", m, default=0)
fix_velocity = lambda v: v * 16
punch1, m = cls._parse_optional(_ClientDataFlags.PUNCH1, flags, "<B", m, default=0)
m_velocity1, m = cls._parse_optional(_ClientDataFlags.VELOCITY1, flags, "<b", m, fix_velocity,
default=0)
punch2, m = cls._parse_optional(_ClientDataFlags.PUNCH2, flags, "<B", m, default=0)
m_velocity2, m = cls._parse_optional(_ClientDataFlags.VELOCITY2, flags, "<b", m, fix_velocity,
default=0)
punch3, m = cls._parse_optional(_ClientDataFlags.PUNCH3, flags, "<B", m, default=0)
m_velocity3, m = cls._parse_optional(_ClientDataFlags.VELOCITY3, flags, "<b", m, fix_velocity,
default=0)
punch_angles = (punch1, punch2, punch3)
m_velocity = (m_velocity1, m_velocity2, m_velocity3)
(items_int,), m = cls._parse_struct("<I", m)
items = ItemFlags(items_int)
on_ground = bool(flags & _ClientDataFlags.ONGROUND)
in_water = bool(flags & _ClientDataFlags.INWATER)
weapon_frame, m = cls._parse_optional(_ClientDataFlags.WEAPONFRAME, flags, "<B", m, default=0)
armor, m = cls._parse_optional(_ClientDataFlags.ARMOR, flags, "<B", m, default=0)
weapon_model_index, m = cls._parse_optional(_ClientDataFlags.WEAPON, flags, "<B", m, default=0)
(health, ammo, shells, nails, rockets, cells, active_weapon), m = cls._parse_struct("<HBBBBBB", m)
active_weapon = ItemFlags(active_weapon)
if protocol.version != ProtocolVersion.NETQUAKE:
weapon_model_index, m = cls._parse_upper_byte(_ClientDataFlags.WEAPON2, flags, weapon_model_index, m)
armor, m = cls._parse_upper_byte(_ClientDataFlags.ARMOR2, flags, armor, m)
ammo, m = cls._parse_upper_byte(_ClientDataFlags.AMMO2, flags, ammo, m)
shells, m = cls._parse_upper_byte(_ClientDataFlags.SHELLS2, flags, shells, m)
nails, m = cls._parse_upper_byte(_ClientDataFlags.NAILS2, flags, nails, m)
rockets, m = cls._parse_upper_byte(_ClientDataFlags.ROCKETS2, flags, rockets, m)
cells, m = cls._parse_upper_byte(_ClientDataFlags.CELLS2, flags, cells, m)
weapon_frame, m = cls._parse_upper_byte(_ClientDataFlags.WEAPONFRAME2, flags, weapon_frame, m)
# TODO: Store weapon alpha
weapon_alpha, m = cls._parse_optional(_ClientDataFlags.WEAPONALPHA, flags, "<B", m)
return cls(
view_height,
ideal_pitch,
punch_angles,
m_velocity,
items,
on_ground,
in_water,
weapon_frame,
armor,
weapon_model_index,
health,
ammo,
shells,
nails,
rockets,
cells,
active_weapon
), m
@_register_server_message
class ServerMessageSound(ServerMessage):
field_names = ('volume', 'attenuation', 'entity_num', 'channel', 'sound_num', 'pos')
msg_type = ServerMessageType.SOUND
@classmethod
def parse(cls, m, protocol):
flags, m = _SoundFlags(m[0]), m[1:]
volume, m = cls._parse_optional(_SoundFlags.VOLUME, flags, "<B", m,
default=_DEFAULT_SOUND_PACKET_VOLUME)
attenuation, m = cls._parse_optional(_SoundFlags.ATTENUATION, flags, "<B", m, lambda b: b / 64.,
default=_DEFAULT_SOUND_PACKET_ATTENUATION)
if protocol.version == ProtocolVersion.NETQUAKE:
fq_flags = flags & _SoundFlags.fitzquake_flags()
if fq_flags:
raise MalformedNetworkData(f'{fq_flags} passed but protocol is {protocol}')
if flags & _SoundFlags.LARGEENTITY:
(ent, channel), m = cls._parse_struct("<HB", m)
else:
(t,), m = cls._parse_struct("<H", m)
entity_num = t >> 3
channel = t & 7
sound_num, m = cls._parse_struct("<H" if flags & _SoundFlags.LARGESOUND else "<B", m)
pos, m = cls._parse_coords(m, protocol)
return cls(volume, attenuation, entity_num, channel, sound_num, pos), m
@_register_server_message
class ServerMessageParticle(ServerMessage):
field_names = ('origin', 'direction', 'count', 'color')
msg_type = ServerMessageType.PARTICLE
@classmethod
def parse(cls, m, protocol):
origin, m = cls._parse_coords(m, protocol)
direction, m = cls._parse_struct("<bbb", m)
direction = tuple(x / 16. for x in direction)
count, m = m[0], m[1:]
if count == 255:
count = 1024
color, m = m[0], m[1:]
return cls(origin, direction, count, color), m
@_register_server_message
class ServerMessageTempEntity(ServerMessage):
field_names = ('temp_entity_type', 'entity_num', 'origin', 'end', 'color_start', 'color_length')
msg_type = ServerMessageType.TEMP_ENTITY
@classmethod
def parse(cls, m, protocol):
temp_entity_type, m = TempEntityTypes(m[0]), m[1:]
if temp_entity_type in (TempEntityTypes.LIGHTNING1, TempEntityTypes.LIGHTNING2, TempEntityTypes.LIGHTNING3,
TempEntityTypes.BEAM):
(entity_num,), m = cls._parse_struct("<H", m)
origin, m = cls._parse_coords(m, protocol)
end, m = cls._parse_coords(m, protocol)
else:
origin, m = cls._parse_coords(m, protocol)
end = None
entity_num = None
if temp_entity_type == TempEntityTypes.EXPLOSION2:
color_start, color_length, m = m[0], m[1], m[2:]
else:
color_start, color_length = None, None
return cls(temp_entity_type, entity_num, origin, end, color_start, color_length), m
@_register_server_message
class ServerMessageKilledMonster(NoFieldsServerMessage):
msg_type = ServerMessageType.KILLEDMONSTER
@_register_server_message
class ServerMessageIntermission(NoFieldsServerMessage):
msg_type = ServerMessageType.INTERMISSION
@_register_server_message
class ServerMessageFinale(ServerMessage):
field_names = ('string',)
msg_type = ServerMessageType.FINALE
@classmethod
def parse(cls, m, protocol):
s, m = cls._parse_string(m)
return cls(s), m
@_register_server_message
class ServerMessageDisconnect(NoFieldsServerMessage):
msg_type = ServerMessageType.DISCONNECT
@_register_server_message
class ServerMessageDamage(ServerMessage):
field_names = ('armor', 'blood', 'origin')
msg_type = ServerMessageType.DAMAGE
@classmethod
def parse(cls, m, protocol):
armor, blood, m = m[0], m[1], m[2:]
origin, m = cls._parse_coords(m, protocol)
return cls(armor, blood, origin), m
def read_demo_file(f):
while _read(f, 1) != b'\n':
pass
demo_header_fmt = "<Ifff"
demo_header_size = struct.calcsize(demo_header_fmt)
protocol = None
while True:
d = f.read(demo_header_size)
if len(d) == 0:
break
if len(d) < demo_header_size:
raise MalformedNetworkData
msg_len, *view_angles = struct.unpack(demo_header_fmt, d)
msg = _read(f, msg_len)
while msg:
parsed, msg = ServerMessage.parse_message(msg, protocol)
if parsed.msg_type == ServerMessageType.SERVERINFO:
protocol = parsed.protocol
yield not bool(msg), view_angles, parsed
def clear_cache():
"""Some messages are cached for efficient parsing of repeated messages.
Call this function to free up memory used by this cache.
"""
ServerMessageUpdate.clear_cache()
def demo_parser_main():
def f():
import sys
with open(sys.argv[1], "rb") as f:
for msg in read_demo_file(f):
if do_print:
print(msg)
do_print = bool(int(os.environ.get('PYQ_PRINT', '1')))
if int(os.environ.get('PYQ_PROFILE', '0')):
import cProfile
cProfile.runctx('f()', globals(), locals(), 'stats')
else:
f() | 0.387922 | 0.109921 |
from tkinter import *
import math
import polyomino as _mino
SYM_OPTS = ["free", "one-sided", "fixed"]
SYM_COLORS = {'|-\\/%@+XO': "tan",
'|-%+': "magenta",
'\\/%X': "yellow",
'%@': "cyan",
'|': "red",
'-': "red",
'\\': "green",
'/': "green",
'%': "blue",
'?': "gray"}
CANVAS_WIDTH = 800
CANVAS_HEIGHT = 600
def draw_mino(canvas, mino, x, y, size, fill):
"""Draw the polyomino on the specified canvas objecct."""
for i, j in mino:
canvas.create_rectangle([x + size*i, y+size*j,
x+size*(i+1), y+size*(j+1)],
fill=fill)
class PolyominoApp(Frame):
def __init__(self, master):
main = Frame(master)
main.pack()
# Size
textframe = Frame(main)
textframe.pack()
Label(textframe, text="Size: ").pack(side=LEFT)
self.scale_size = Scale(textframe, from_=0, to=11, orient=HORIZONTAL)
self.scale_size.pack(side=LEFT)
# Make the symmetry-choice buttons
symframe = Frame(main)
symframe.pack()
self.sym_value = IntVar()
self.sym_value.set(0)
for index, opt in enumerate(SYM_OPTS):
button = Radiobutton(symframe,
text=opt,
variable=self.sym_value,
value=index)
button.pack(anchor=W, side=LEFT)
# Make the "highlight symmetries" checkbox
self.symcolor_value = IntVar()
self.check_symcolor = Checkbutton(main, text="Highlight symmetries",
variable=self.symcolor_value)
self.check_symcolor.pack()
# Make the submit button
self.btn_submit = Button(main, text="Generate", command=self.submit)
self.btn_submit.pack()
# Canvas to show the results
canvasframe = Frame(main)
canvasframe.pack()
yscroll = Scrollbar(canvasframe)
yscroll.pack(side=RIGHT, fill=Y)
self.canvas = Canvas(canvasframe, bg="white",
width=CANVAS_WIDTH, height=CANVAS_HEIGHT,
scrollregion=(0,0,CANVAS_WIDTH,CANVAS_HEIGHT*2),
yscrollcommand=yscroll.set)
self.canvas.pack(side=LEFT, fill=BOTH)
yscroll.config(command=self.canvas.yview)
def submit(self):
# clear the canvas
self.canvas.delete(ALL)
## try:
n = int(self.scale_size.get())
## except ValueError:
## self.canvas.create_text(100, 100,
## text="Must input number", fill="red")
## return
sym = self.sym_value.get()
symcolor = self.symcolor_value.get()
if sym == 0:
minos = sorted(_mino.free(_mino.generate(n)), key=_mino.mino_key)
elif sym == 1:
minos = sorted(_mino.one_sided(_mino.generate(n)), key=_mino.mino_key)
else:
minos = sorted(_mino.generate(n), key=_mino.mino_key)
text = ("There are {0} {1} polyominoes of order {2}".format(
len(minos), SYM_OPTS[sym], n))
self.canvas.create_text(CANVAS_WIDTH//2, 25, text=text)
# Determine sizes
size = 5
padding = 2
margin = 40
minos_per_line = (CANVAS_WIDTH - margin * 2) // ((n + padding) * size)
ypos = margin
scroll_height = 2*margin + (n+padding)*size*(len(minos)//minos_per_line)
self.canvas.config(scrollregion=(0,0,CANVAS_WIDTH, scroll_height))
while minos:
for i in range(minos_per_line):
if not minos:
break
xpos = margin + ((n + padding) * size) * i
mino = minos.pop()
draw_mino(self.canvas, mino,
xpos, ypos, size,
fill=(SYM_COLORS[mino.symmetry()]
if symcolor else "gray"))
ypos += (n+padding) * size
root = Tk()
root.wm_title("Polyomino App")
app = PolyominoApp(root)
root.mainloop() | polyomino_app.py | from tkinter import *
import math
import polyomino as _mino
SYM_OPTS = ["free", "one-sided", "fixed"]
SYM_COLORS = {'|-\\/%@+XO': "tan",
'|-%+': "magenta",
'\\/%X': "yellow",
'%@': "cyan",
'|': "red",
'-': "red",
'\\': "green",
'/': "green",
'%': "blue",
'?': "gray"}
CANVAS_WIDTH = 800
CANVAS_HEIGHT = 600
def draw_mino(canvas, mino, x, y, size, fill):
"""Draw the polyomino on the specified canvas objecct."""
for i, j in mino:
canvas.create_rectangle([x + size*i, y+size*j,
x+size*(i+1), y+size*(j+1)],
fill=fill)
class PolyominoApp(Frame):
def __init__(self, master):
main = Frame(master)
main.pack()
# Size
textframe = Frame(main)
textframe.pack()
Label(textframe, text="Size: ").pack(side=LEFT)
self.scale_size = Scale(textframe, from_=0, to=11, orient=HORIZONTAL)
self.scale_size.pack(side=LEFT)
# Make the symmetry-choice buttons
symframe = Frame(main)
symframe.pack()
self.sym_value = IntVar()
self.sym_value.set(0)
for index, opt in enumerate(SYM_OPTS):
button = Radiobutton(symframe,
text=opt,
variable=self.sym_value,
value=index)
button.pack(anchor=W, side=LEFT)
# Make the "highlight symmetries" checkbox
self.symcolor_value = IntVar()
self.check_symcolor = Checkbutton(main, text="Highlight symmetries",
variable=self.symcolor_value)
self.check_symcolor.pack()
# Make the submit button
self.btn_submit = Button(main, text="Generate", command=self.submit)
self.btn_submit.pack()
# Canvas to show the results
canvasframe = Frame(main)
canvasframe.pack()
yscroll = Scrollbar(canvasframe)
yscroll.pack(side=RIGHT, fill=Y)
self.canvas = Canvas(canvasframe, bg="white",
width=CANVAS_WIDTH, height=CANVAS_HEIGHT,
scrollregion=(0,0,CANVAS_WIDTH,CANVAS_HEIGHT*2),
yscrollcommand=yscroll.set)
self.canvas.pack(side=LEFT, fill=BOTH)
yscroll.config(command=self.canvas.yview)
def submit(self):
# clear the canvas
self.canvas.delete(ALL)
## try:
n = int(self.scale_size.get())
## except ValueError:
## self.canvas.create_text(100, 100,
## text="Must input number", fill="red")
## return
sym = self.sym_value.get()
symcolor = self.symcolor_value.get()
if sym == 0:
minos = sorted(_mino.free(_mino.generate(n)), key=_mino.mino_key)
elif sym == 1:
minos = sorted(_mino.one_sided(_mino.generate(n)), key=_mino.mino_key)
else:
minos = sorted(_mino.generate(n), key=_mino.mino_key)
text = ("There are {0} {1} polyominoes of order {2}".format(
len(minos), SYM_OPTS[sym], n))
self.canvas.create_text(CANVAS_WIDTH//2, 25, text=text)
# Determine sizes
size = 5
padding = 2
margin = 40
minos_per_line = (CANVAS_WIDTH - margin * 2) // ((n + padding) * size)
ypos = margin
scroll_height = 2*margin + (n+padding)*size*(len(minos)//minos_per_line)
self.canvas.config(scrollregion=(0,0,CANVAS_WIDTH, scroll_height))
while minos:
for i in range(minos_per_line):
if not minos:
break
xpos = margin + ((n + padding) * size) * i
mino = minos.pop()
draw_mino(self.canvas, mino,
xpos, ypos, size,
fill=(SYM_COLORS[mino.symmetry()]
if symcolor else "gray"))
ypos += (n+padding) * size
root = Tk()
root.wm_title("Polyomino App")
app = PolyominoApp(root)
root.mainloop() | 0.538255 | 0.159446 |
import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath, utils
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
from hydroDL.master import basins
from hydroDL.app import waterQuality
"""
instead of saving time series by rho, save the full time series here.
f and q will be saved in full matirx
c will saved in sparse matrix
"""
# load sites
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteSel', 'Q90ref')
siteNoLst = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
freq = 'D'
nFill = 5
sdStr = '1979-01-01'
edStr = '2019-12-31'
# ts data
varF = gridMET.varLst
varQ = usgs.varQ
varG = gageII.lstWaterQuality
# varC=
# gageII
tabG = gageII.readData(varLst=varG, siteNoLst=siteNoLst)
tabG = gageII.updateCode(tabG)
tR = pd.date_range(np.datetime64(sdStr), np.datetime64(edStr))
fLst, qLst, gLst = [list() for x in range(3)]
infoLst = list()
t0 = time.time()
for i, siteNo in enumerate(siteNoLst):
t1 = time.time()
varLst = varQ+varF
df = waterQuality.readSiteTS(siteNo, varLst=varLst, freq=freq)
# streamflow
tempQ = pd.DataFrame({'date': tR}).set_index('date').join(df[varQ])
qLst.append(tempQ.values)
# forcings
tempF = pd.DataFrame({'date': tR}).set_index('date').join(df[varF])
tempF = tempF.interpolate(
limit=nFill, limit_direction='both', limit_area='inside')
fLst.append(tempF.values)
# geog
gLst.append(tabG.loc[siteNo].values)
t2 = time.time()
print('{} on site {} reading {:.3f} total {:.3f}'.format(
i, siteNo, t2-t1, t2-t0))
f = np.stack(fLst, axis=-1).swapaxes(1, 2).astype(np.float32)
q = np.stack(qLst, axis=-1).swapaxes(1, 2).astype(np.float32)
g = np.stack(gLst, axis=-1).swapaxes(0, 1).astype(np.float32)
# save
caseName = 'Q90'
saveFolder = os.path.join(kPath.dirWQ, 'trainDataFull', caseName)
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
np.save(os.path.join(saveFolder, 'Q'), q)
np.save(os.path.join(saveFolder, 'F'), f)
np.save(os.path.join(saveFolder, 'G'), g)
dictData = dict(name=caseName, varG=varG, varQ=varQ, varF=varF,
sd=sdStr, ed=edStr, siteNoLst=siteNoLst)
with open(os.path.join(saveFolder, 'info')+'.json', 'w') as fp:
json.dump(dictData, fp, indent=4) | app/streamflow/prep/wrap-test.py | import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath, utils
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
from hydroDL.master import basins
from hydroDL.app import waterQuality
"""
instead of saving time series by rho, save the full time series here.
f and q will be saved in full matirx
c will saved in sparse matrix
"""
# load sites
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteSel', 'Q90ref')
siteNoLst = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
freq = 'D'
nFill = 5
sdStr = '1979-01-01'
edStr = '2019-12-31'
# ts data
varF = gridMET.varLst
varQ = usgs.varQ
varG = gageII.lstWaterQuality
# varC=
# gageII
tabG = gageII.readData(varLst=varG, siteNoLst=siteNoLst)
tabG = gageII.updateCode(tabG)
tR = pd.date_range(np.datetime64(sdStr), np.datetime64(edStr))
fLst, qLst, gLst = [list() for x in range(3)]
infoLst = list()
t0 = time.time()
for i, siteNo in enumerate(siteNoLst):
t1 = time.time()
varLst = varQ+varF
df = waterQuality.readSiteTS(siteNo, varLst=varLst, freq=freq)
# streamflow
tempQ = pd.DataFrame({'date': tR}).set_index('date').join(df[varQ])
qLst.append(tempQ.values)
# forcings
tempF = pd.DataFrame({'date': tR}).set_index('date').join(df[varF])
tempF = tempF.interpolate(
limit=nFill, limit_direction='both', limit_area='inside')
fLst.append(tempF.values)
# geog
gLst.append(tabG.loc[siteNo].values)
t2 = time.time()
print('{} on site {} reading {:.3f} total {:.3f}'.format(
i, siteNo, t2-t1, t2-t0))
f = np.stack(fLst, axis=-1).swapaxes(1, 2).astype(np.float32)
q = np.stack(qLst, axis=-1).swapaxes(1, 2).astype(np.float32)
g = np.stack(gLst, axis=-1).swapaxes(0, 1).astype(np.float32)
# save
caseName = 'Q90'
saveFolder = os.path.join(kPath.dirWQ, 'trainDataFull', caseName)
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
np.save(os.path.join(saveFolder, 'Q'), q)
np.save(os.path.join(saveFolder, 'F'), f)
np.save(os.path.join(saveFolder, 'G'), g)
dictData = dict(name=caseName, varG=varG, varQ=varQ, varF=varF,
sd=sdStr, ed=edStr, siteNoLst=siteNoLst)
with open(os.path.join(saveFolder, 'info')+'.json', 'w') as fp:
json.dump(dictData, fp, indent=4) | 0.239616 | 0.170888 |
import math
from datetime import timedelta
from datetime import datetime
MAX_GEE_PIXELS_DOWNLOAD = 1048576
GEE_ERROR_PLACEHOLDER = "ImageCollection.getRegion: Too many values: "
__all__ = ('tile_coordinates', 'retrieve_max_pixel_count_from_pattern',
'cmp_coords', 'get_date_interval_array', 'make_polygon')
def make_polygon(top_left, bottom_right):
'''Given two (lon, lat) coordinates of both the top left and bottom right corner of a polygon, return the list of corner coordinates of this polygon
Parameters
----------
top_left : list of int or tuple of int
Top Left coordinates of the polygon
bottom_right : list of int or tuple of int
Bottom right coordinates of the polygon
Returns
-------
list
2-D list of the 5 coordinates need to create a Rectangular Polygon ``[top_left, top_right, bottom_right, bottom_left, top_left]``.
'''
return [
list(top_left),
[bottom_right[0], top_left[1]],
list(bottom_right),
[top_left[0], bottom_right[1]],
list(top_left)
]
def tile_coordinates(total_count_of_pixels, coordinates, max_gee=MAX_GEE_PIXELS_DOWNLOAD):
'''Given a coordinates array describing a Polygon, a count of pixes within that polygons, tiles this polygon into a grid a sub-Polygons where each sub-Polygon size matches the max_gee pixel count given as a parameter.
Parameters
----------
total_count_of_pixels : int
Total number of pixels of the designated area
coordinates : array of array of floats
Can be a 5-sized list of every coordinates defining the polygon ``[[long1, lat1],[long2, lat1]...,[long1, lat1]]`` or a 2-sized list of coordinates defining the top left and bottom right corner of the Polygon ``[[long1, lat1],[long2, lat2]]``
max_gee_threshold : int, optional
Total number of points allowed for one data query. Default: 1048576
Returns
-------
list
3-dimensional list of coordinates with pixel count inferior or equal to the maximum GEE threshold (shape: ``(number of images, number of coordinates per image, 2)``)
'''
assert(len(coordinates) == 2 or len(coordinates) == 5)
list_of_coordinates = []
if len(coordinates) == 2:
tmp_c = make_polygon(coordinates[0], coordinates[1])
else:
tmp_c = coordinates
if (total_count_of_pixels < max_gee):
return [tmp_c]
# The coordinate polygon will be tiled in `grid_length * grid_length` sub-Polygons
grid_length = int(math.ceil(math.sqrt(total_count_of_pixels/max_gee)))
original_polygon_width = tmp_c[1][0] - tmp_c[0][0]
original_polygon_height = tmp_c[3][1] - tmp_c[0][1]
for i in range(grid_length):
for j in range(grid_length):
list_of_coordinates.append(
make_polygon(
[tmp_c[0][0]+i*original_polygon_width/grid_length,
tmp_c[0][1]+j*original_polygon_height/grid_length],
[tmp_c[0][0]+(i+1)*original_polygon_width/grid_length,
tmp_c[0][1]+(j+1)*original_polygon_height/grid_length]
)
)
return list_of_coordinates
def retrieve_max_pixel_count_from_pattern(error_str):
'''Given an input getRegion error from GEE, extract the provided points count.
Parameters
----------
error_str : str
the str text of the GEE error (e.g. the function caled on ``"ImageCollection.getRegion: Too many values: x points ..."`` will output x)
Returns
-------
int
Returns the number of points specified in the input image
'''
try:
return int(error_str.split("ImageCollection.getRegion: Too many values: ")[1].split(" points")[0])
except:
raise ValueError("No max pixels value found")
def cmp_coords(a, b):
'''
Given two coordinates dict a and b, compare which one is closer to the North-Eastern direction
Parameters
----------
a : dict
dict with keys ``"lon"`` and ``"lat"``
b : dict
dict with keys ``"lon"`` and ``"lat"``
Returns
-------
int
**-1** if ``a > b``, **1** if ``a < b``, **0** if ``a == b``
'''
if a["lat"] != b["lat"]:
return 1 if a["lat"] < b["lat"] else -1
elif a["lon"] != b["lon"]:
return 1 if a["lon"] < b["lon"] else -1
else:
return 0
def define_image_shape(pixel_values):
"""Define image shape based on number pixel and latitude values
Parameters
----------
pixel_values :
Dictionnary with retrieved pixel values along with latitude and
longitude coordinates
Returns
-------
(with, height) :
A tuple with the width and height of the requested area of interest
"""
# count pixels with common latitude until it changes to know the image width
width = 1
while pixel_values[width]["lat"] == pixel_values[0]["lat"]:
width += 1
# deduce the image height from its width
height = len(pixel_values) // width
return (width, height)
def get_date_interval_array(start_date, end_date, day_timedelta=1):
'''Initialize a list of days interval of size ``day_timedelta`` iteratively created between ``start_date`` and ``end_date``.
Parameters
----------
start_date : datetime.datetime
first date time of the array
end_date : datetime.datetime
last date of the array
day_timedelta : int
size, in days, of every interval
'''
assert(start_date is not None and end_date is not None and day_timedelta is not None)
assert(start_date < end_date)
assert(type(day_timedelta) == int)
date_intervals = []
tmp_date = start_date
while tmp_date < end_date:
date_intervals.append((tmp_date.strftime(
"%Y-%m-%d"), (tmp_date + timedelta(days=day_timedelta)).strftime("%Y-%m-%d")))
tmp_date += timedelta(days=day_timedelta)
return date_intervals
def print_verbose(msg, verbose, expected_verbose):
if verbose == 0:
pass
elif verbose == 1 and expected_verbose >= 1:
print(msg)
elif verbose == 2 and expected_verbose >= 1:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(f"[INFO] t={current_time} | {msg}") | geesarfetcher/utils/__init__.py | import math
from datetime import timedelta
from datetime import datetime
MAX_GEE_PIXELS_DOWNLOAD = 1048576
GEE_ERROR_PLACEHOLDER = "ImageCollection.getRegion: Too many values: "
__all__ = ('tile_coordinates', 'retrieve_max_pixel_count_from_pattern',
'cmp_coords', 'get_date_interval_array', 'make_polygon')
def make_polygon(top_left, bottom_right):
'''Given two (lon, lat) coordinates of both the top left and bottom right corner of a polygon, return the list of corner coordinates of this polygon
Parameters
----------
top_left : list of int or tuple of int
Top Left coordinates of the polygon
bottom_right : list of int or tuple of int
Bottom right coordinates of the polygon
Returns
-------
list
2-D list of the 5 coordinates need to create a Rectangular Polygon ``[top_left, top_right, bottom_right, bottom_left, top_left]``.
'''
return [
list(top_left),
[bottom_right[0], top_left[1]],
list(bottom_right),
[top_left[0], bottom_right[1]],
list(top_left)
]
def tile_coordinates(total_count_of_pixels, coordinates, max_gee=MAX_GEE_PIXELS_DOWNLOAD):
'''Given a coordinates array describing a Polygon, a count of pixes within that polygons, tiles this polygon into a grid a sub-Polygons where each sub-Polygon size matches the max_gee pixel count given as a parameter.
Parameters
----------
total_count_of_pixels : int
Total number of pixels of the designated area
coordinates : array of array of floats
Can be a 5-sized list of every coordinates defining the polygon ``[[long1, lat1],[long2, lat1]...,[long1, lat1]]`` or a 2-sized list of coordinates defining the top left and bottom right corner of the Polygon ``[[long1, lat1],[long2, lat2]]``
max_gee_threshold : int, optional
Total number of points allowed for one data query. Default: 1048576
Returns
-------
list
3-dimensional list of coordinates with pixel count inferior or equal to the maximum GEE threshold (shape: ``(number of images, number of coordinates per image, 2)``)
'''
assert(len(coordinates) == 2 or len(coordinates) == 5)
list_of_coordinates = []
if len(coordinates) == 2:
tmp_c = make_polygon(coordinates[0], coordinates[1])
else:
tmp_c = coordinates
if (total_count_of_pixels < max_gee):
return [tmp_c]
# The coordinate polygon will be tiled in `grid_length * grid_length` sub-Polygons
grid_length = int(math.ceil(math.sqrt(total_count_of_pixels/max_gee)))
original_polygon_width = tmp_c[1][0] - tmp_c[0][0]
original_polygon_height = tmp_c[3][1] - tmp_c[0][1]
for i in range(grid_length):
for j in range(grid_length):
list_of_coordinates.append(
make_polygon(
[tmp_c[0][0]+i*original_polygon_width/grid_length,
tmp_c[0][1]+j*original_polygon_height/grid_length],
[tmp_c[0][0]+(i+1)*original_polygon_width/grid_length,
tmp_c[0][1]+(j+1)*original_polygon_height/grid_length]
)
)
return list_of_coordinates
def retrieve_max_pixel_count_from_pattern(error_str):
'''Given an input getRegion error from GEE, extract the provided points count.
Parameters
----------
error_str : str
the str text of the GEE error (e.g. the function caled on ``"ImageCollection.getRegion: Too many values: x points ..."`` will output x)
Returns
-------
int
Returns the number of points specified in the input image
'''
try:
return int(error_str.split("ImageCollection.getRegion: Too many values: ")[1].split(" points")[0])
except:
raise ValueError("No max pixels value found")
def cmp_coords(a, b):
'''
Given two coordinates dict a and b, compare which one is closer to the North-Eastern direction
Parameters
----------
a : dict
dict with keys ``"lon"`` and ``"lat"``
b : dict
dict with keys ``"lon"`` and ``"lat"``
Returns
-------
int
**-1** if ``a > b``, **1** if ``a < b``, **0** if ``a == b``
'''
if a["lat"] != b["lat"]:
return 1 if a["lat"] < b["lat"] else -1
elif a["lon"] != b["lon"]:
return 1 if a["lon"] < b["lon"] else -1
else:
return 0
def define_image_shape(pixel_values):
"""Define image shape based on number pixel and latitude values
Parameters
----------
pixel_values :
Dictionnary with retrieved pixel values along with latitude and
longitude coordinates
Returns
-------
(with, height) :
A tuple with the width and height of the requested area of interest
"""
# count pixels with common latitude until it changes to know the image width
width = 1
while pixel_values[width]["lat"] == pixel_values[0]["lat"]:
width += 1
# deduce the image height from its width
height = len(pixel_values) // width
return (width, height)
def get_date_interval_array(start_date, end_date, day_timedelta=1):
'''Initialize a list of days interval of size ``day_timedelta`` iteratively created between ``start_date`` and ``end_date``.
Parameters
----------
start_date : datetime.datetime
first date time of the array
end_date : datetime.datetime
last date of the array
day_timedelta : int
size, in days, of every interval
'''
assert(start_date is not None and end_date is not None and day_timedelta is not None)
assert(start_date < end_date)
assert(type(day_timedelta) == int)
date_intervals = []
tmp_date = start_date
while tmp_date < end_date:
date_intervals.append((tmp_date.strftime(
"%Y-%m-%d"), (tmp_date + timedelta(days=day_timedelta)).strftime("%Y-%m-%d")))
tmp_date += timedelta(days=day_timedelta)
return date_intervals
def print_verbose(msg, verbose, expected_verbose):
if verbose == 0:
pass
elif verbose == 1 and expected_verbose >= 1:
print(msg)
elif verbose == 2 and expected_verbose >= 1:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(f"[INFO] t={current_time} | {msg}") | 0.840193 | 0.673903 |
import unittest
import pywintypes
import wellcad.com
from ._extra_asserts import ExtraAsserts
from ._sample_path import SamplePath
class TestEquipmentItem(unittest.TestCase, ExtraAsserts, SamplePath):
@classmethod
def setUpClass(cls):
cls.app = wellcad.com.Application()
cls.sample_path = cls._find_sample_path()
cls.borehole = cls.app.open_borehole(str(cls.sample_path / "Engineering Log and Borehole Volume.wcl"))
cls.engineering_log = cls.borehole.get_log("Well Sketch")
cls.eqp_item = cls.engineering_log.eqp_item(10)
cls.liquid_eqp_item = cls.engineering_log.eqp_item(16)
@classmethod
def tearDownClass(cls):
cls.app.quit(False)
def test_top_bottom(self):
top = self.eqp_item.top_depth # 50
bottom = self.eqp_item.bottom_depth # 60
self.assertIsInstance(top, float)
self.assertIsInstance(bottom, float)
self.assertGreaterEqual(bottom, top)
self.assertAttrAlmostChange(self.eqp_item, 'top_depth', top + 5, 3)
self.assertAttrAlmostChange(self.eqp_item, 'bottom_depth', bottom + 5, 3)
def test_swapped_top_bottom(self):
top = self.eqp_item.top_depth # 50
bottom = self.eqp_item.bottom_depth # 60
self.eqp_item.top_depth = bottom + 10
self.eqp_item.bottom_depth = top - 10
self.assertAlmostEqual(self.eqp_item.top_depth, top, 3) # no change because greater than bottom
self.assertAlmostEqual(self.eqp_item.bottom_depth, bottom, 3) # no change because smaller than top
def test_axis_position(self):
self.assertAlmostEqual(self.eqp_item.axis_position, 0.0, 3)
self.assertAttrAlmostChange(self.eqp_item, 'axis_position', 0.2, 3)
def test_diameter(self):
inner = self.eqp_item.internal_diameter # 90
outer = self.eqp_item.external_diameter # 100
self.assertIsInstance(inner, float)
self.assertIsInstance(outer, float)
self.assertGreaterEqual(outer, inner)
self.assertAttrAlmostChange(self.eqp_item, 'internal_diameter', inner + 5, 3)
self.assertAttrAlmostChange(self.eqp_item, 'external_diameter', outer + 5, 3)
def test_swapped_diameter(self):
inner = self.eqp_item.internal_diameter # 90
outer = self.eqp_item.external_diameter # 100
self.eqp_item.top_depth = outer + 10
self.eqp_item.bottom_depth = inner - 10
self.assertAlmostEqual(self.eqp_item.internal_diameter, inner, 3) # no change because greater than outer
self.assertAlmostEqual(self.eqp_item.external_diameter, outer, 3) # no change because smaller than inner
def test_type(self):
self.assertAttrEqual(self.eqp_item, "type", 2)
def test_name(self):
self.assertAttrEqual(self.eqp_item, "name", "PlainCasing")
def test_description(self):
self.assertAttrEqual(self.eqp_item, "description", "Plain casing")
def test_comment(self):
self.assertAttrEqual(self.eqp_item, "comment", "")
self.assertAttrChange(self.eqp_item, "comment", "a fresh comment")
def test_axis_position_fail_on_non_solid(self):
with self.assertRaises(pywintypes.com_error):
self.liquid_eqp_item.axis_position
def test_int_diameter_fail_on_non_solid(self):
with self.assertRaises(pywintypes.com_error):
self.liquid_eqp_item.internal_diameter
def test_ext_diameter_fail_on_non_solid(self):
with self.assertRaises(pywintypes.com_error):
self.liquid_eqp_item.external_diameter
def test_set_injection_position(self):
self.assertAttrEqual(self.liquid_eqp_item, 'injection_position', 0.0)
self.assertAttrAlmostChange(self.liquid_eqp_item, 'injection_position', 0.5, 3)
def test_set_injection_depth(self):
self.assertAttrEqual(self.liquid_eqp_item, 'injection_depth', 0.0) # initial value is 0
self.assertAttrAlmostChange(self.liquid_eqp_item, 'injection_depth', 70.0, 3) # fails when trying to set it back to 0, sets it to 60 (bottom_depth) instead
def test_weight(self):
self.assertAttrEqual(self.eqp_item, 'weight', -1.0) # default value is -1 ?
self.assertAttrAlmostChange(self.eqp_item, 'weight', 0.5, 3)
def test_thickness(self):
self.assertAttrEqual(self.eqp_item, 'thickness', 5.0)
self.assertAttrAlmostChange(self.eqp_item, 'thickness', 7.0, 3)
def test_thickness_and_diameter(self):
self.assertAttrEqual(self.eqp_item, 'thickness', 5.0)
self.assertAttrEqual(self.eqp_item, 'external_diameter', 100.0)
self.assertAttrEqual(self.eqp_item, 'internal_diameter', 90.0)
self.eqp_item.thickness = 10.0
self.assertAttrEqual(self.eqp_item, 'thickness', 10.0)
self.assertAttrEqual(self.eqp_item, 'external_diameter', 100.0)
self.assertAttrEqual(self.eqp_item, 'internal_diameter', 80.0)
self.eqp_item.thickness = 5.0
self.assertAttrEqual(self.eqp_item, 'thickness', 5.0)
self.assertAttrEqual(self.eqp_item, 'external_diameter', 100.0)
self.assertAttrEqual(self.eqp_item, 'internal_diameter', 90.0)
def test_grade(self):
self.assertAttrEqual(self.eqp_item, 'grade', "")
self.assertAttrChange(self.eqp_item, 'grade', "good casing")
if __name__ == '__main__':
unittest.main() | test/test_equipment_item.py | import unittest
import pywintypes
import wellcad.com
from ._extra_asserts import ExtraAsserts
from ._sample_path import SamplePath
class TestEquipmentItem(unittest.TestCase, ExtraAsserts, SamplePath):
@classmethod
def setUpClass(cls):
cls.app = wellcad.com.Application()
cls.sample_path = cls._find_sample_path()
cls.borehole = cls.app.open_borehole(str(cls.sample_path / "Engineering Log and Borehole Volume.wcl"))
cls.engineering_log = cls.borehole.get_log("Well Sketch")
cls.eqp_item = cls.engineering_log.eqp_item(10)
cls.liquid_eqp_item = cls.engineering_log.eqp_item(16)
@classmethod
def tearDownClass(cls):
cls.app.quit(False)
def test_top_bottom(self):
top = self.eqp_item.top_depth # 50
bottom = self.eqp_item.bottom_depth # 60
self.assertIsInstance(top, float)
self.assertIsInstance(bottom, float)
self.assertGreaterEqual(bottom, top)
self.assertAttrAlmostChange(self.eqp_item, 'top_depth', top + 5, 3)
self.assertAttrAlmostChange(self.eqp_item, 'bottom_depth', bottom + 5, 3)
def test_swapped_top_bottom(self):
top = self.eqp_item.top_depth # 50
bottom = self.eqp_item.bottom_depth # 60
self.eqp_item.top_depth = bottom + 10
self.eqp_item.bottom_depth = top - 10
self.assertAlmostEqual(self.eqp_item.top_depth, top, 3) # no change because greater than bottom
self.assertAlmostEqual(self.eqp_item.bottom_depth, bottom, 3) # no change because smaller than top
def test_axis_position(self):
self.assertAlmostEqual(self.eqp_item.axis_position, 0.0, 3)
self.assertAttrAlmostChange(self.eqp_item, 'axis_position', 0.2, 3)
def test_diameter(self):
inner = self.eqp_item.internal_diameter # 90
outer = self.eqp_item.external_diameter # 100
self.assertIsInstance(inner, float)
self.assertIsInstance(outer, float)
self.assertGreaterEqual(outer, inner)
self.assertAttrAlmostChange(self.eqp_item, 'internal_diameter', inner + 5, 3)
self.assertAttrAlmostChange(self.eqp_item, 'external_diameter', outer + 5, 3)
def test_swapped_diameter(self):
inner = self.eqp_item.internal_diameter # 90
outer = self.eqp_item.external_diameter # 100
self.eqp_item.top_depth = outer + 10
self.eqp_item.bottom_depth = inner - 10
self.assertAlmostEqual(self.eqp_item.internal_diameter, inner, 3) # no change because greater than outer
self.assertAlmostEqual(self.eqp_item.external_diameter, outer, 3) # no change because smaller than inner
def test_type(self):
self.assertAttrEqual(self.eqp_item, "type", 2)
def test_name(self):
self.assertAttrEqual(self.eqp_item, "name", "PlainCasing")
def test_description(self):
self.assertAttrEqual(self.eqp_item, "description", "Plain casing")
def test_comment(self):
self.assertAttrEqual(self.eqp_item, "comment", "")
self.assertAttrChange(self.eqp_item, "comment", "a fresh comment")
def test_axis_position_fail_on_non_solid(self):
with self.assertRaises(pywintypes.com_error):
self.liquid_eqp_item.axis_position
def test_int_diameter_fail_on_non_solid(self):
with self.assertRaises(pywintypes.com_error):
self.liquid_eqp_item.internal_diameter
def test_ext_diameter_fail_on_non_solid(self):
with self.assertRaises(pywintypes.com_error):
self.liquid_eqp_item.external_diameter
def test_set_injection_position(self):
self.assertAttrEqual(self.liquid_eqp_item, 'injection_position', 0.0)
self.assertAttrAlmostChange(self.liquid_eqp_item, 'injection_position', 0.5, 3)
def test_set_injection_depth(self):
self.assertAttrEqual(self.liquid_eqp_item, 'injection_depth', 0.0) # initial value is 0
self.assertAttrAlmostChange(self.liquid_eqp_item, 'injection_depth', 70.0, 3) # fails when trying to set it back to 0, sets it to 60 (bottom_depth) instead
def test_weight(self):
self.assertAttrEqual(self.eqp_item, 'weight', -1.0) # default value is -1 ?
self.assertAttrAlmostChange(self.eqp_item, 'weight', 0.5, 3)
def test_thickness(self):
self.assertAttrEqual(self.eqp_item, 'thickness', 5.0)
self.assertAttrAlmostChange(self.eqp_item, 'thickness', 7.0, 3)
def test_thickness_and_diameter(self):
self.assertAttrEqual(self.eqp_item, 'thickness', 5.0)
self.assertAttrEqual(self.eqp_item, 'external_diameter', 100.0)
self.assertAttrEqual(self.eqp_item, 'internal_diameter', 90.0)
self.eqp_item.thickness = 10.0
self.assertAttrEqual(self.eqp_item, 'thickness', 10.0)
self.assertAttrEqual(self.eqp_item, 'external_diameter', 100.0)
self.assertAttrEqual(self.eqp_item, 'internal_diameter', 80.0)
self.eqp_item.thickness = 5.0
self.assertAttrEqual(self.eqp_item, 'thickness', 5.0)
self.assertAttrEqual(self.eqp_item, 'external_diameter', 100.0)
self.assertAttrEqual(self.eqp_item, 'internal_diameter', 90.0)
def test_grade(self):
self.assertAttrEqual(self.eqp_item, 'grade', "")
self.assertAttrChange(self.eqp_item, 'grade', "good casing")
if __name__ == '__main__':
unittest.main() | 0.661376 | 0.596727 |
from types import MappingProxyType
from typing import Optional, Iterable
from dataclasses import field
from websockets.server import WebSocketServerProtocol
from dataclasses import dataclass
from ..types import ChannelId, SubscriptionId
@dataclass
class ClientState:
"""
ClientState holds information about subscriptions from a given client, used by the server for
bookkeeping. The `subscriptions` and `subscriptions_by_channel` are immutable, which makes them
safe to use in concurrent (async) code without worrying that they will be mutated during
iteration.
"""
connection: WebSocketServerProtocol
subscriptions: "MappingProxyType[SubscriptionId, ChannelId]" = field(
default_factory=lambda: MappingProxyType({})
)
subscriptions_by_channel: "MappingProxyType[ChannelId, Iterable[SubscriptionId]]" = field(
default_factory=lambda: MappingProxyType({})
)
def remove_channel(self, removed_chan_id: ChannelId):
subs = self.subscriptions_by_channel.get(removed_chan_id)
if subs is not None:
self.subscriptions = MappingProxyType(
{
sub: chan
for sub, chan in self.subscriptions.items()
if chan != removed_chan_id
}
)
self.subscriptions_by_channel = MappingProxyType(
{
chan: subs
for chan, subs in self.subscriptions_by_channel.items()
if chan != removed_chan_id
}
)
def add_subscription(self, sub_id: SubscriptionId, chan_id: ChannelId):
self.subscriptions = MappingProxyType({**self.subscriptions, sub_id: chan_id})
self.subscriptions_by_channel = MappingProxyType(
{
**self.subscriptions_by_channel,
chan_id: {*self.subscriptions_by_channel.get(chan_id, ()), sub_id},
}
)
def remove_subscription(
self, removed_sub_id: SubscriptionId
) -> Optional[ChannelId]:
chan_id = self.subscriptions.get(removed_sub_id)
if chan_id is None:
return None
self.subscriptions = MappingProxyType(
{
sub: chan
for sub, chan in self.subscriptions.items()
if sub != removed_sub_id
}
)
new_subscriptions_by_channel = {
chan: subs
for chan, subs in self.subscriptions_by_channel.items()
if chan != chan_id
}
new_subs = {
sub_id
for sub_id in self.subscriptions_by_channel.get(chan_id, ())
if sub_id != removed_sub_id
}
if new_subs:
new_subscriptions_by_channel[chan_id] = new_subs
self.subscriptions_by_channel = MappingProxyType(new_subscriptions_by_channel)
return chan_id | python/src/foxglove_websocket/server/client_state.py | from types import MappingProxyType
from typing import Optional, Iterable
from dataclasses import field
from websockets.server import WebSocketServerProtocol
from dataclasses import dataclass
from ..types import ChannelId, SubscriptionId
@dataclass
class ClientState:
"""
ClientState holds information about subscriptions from a given client, used by the server for
bookkeeping. The `subscriptions` and `subscriptions_by_channel` are immutable, which makes them
safe to use in concurrent (async) code without worrying that they will be mutated during
iteration.
"""
connection: WebSocketServerProtocol
subscriptions: "MappingProxyType[SubscriptionId, ChannelId]" = field(
default_factory=lambda: MappingProxyType({})
)
subscriptions_by_channel: "MappingProxyType[ChannelId, Iterable[SubscriptionId]]" = field(
default_factory=lambda: MappingProxyType({})
)
def remove_channel(self, removed_chan_id: ChannelId):
subs = self.subscriptions_by_channel.get(removed_chan_id)
if subs is not None:
self.subscriptions = MappingProxyType(
{
sub: chan
for sub, chan in self.subscriptions.items()
if chan != removed_chan_id
}
)
self.subscriptions_by_channel = MappingProxyType(
{
chan: subs
for chan, subs in self.subscriptions_by_channel.items()
if chan != removed_chan_id
}
)
def add_subscription(self, sub_id: SubscriptionId, chan_id: ChannelId):
self.subscriptions = MappingProxyType({**self.subscriptions, sub_id: chan_id})
self.subscriptions_by_channel = MappingProxyType(
{
**self.subscriptions_by_channel,
chan_id: {*self.subscriptions_by_channel.get(chan_id, ()), sub_id},
}
)
def remove_subscription(
self, removed_sub_id: SubscriptionId
) -> Optional[ChannelId]:
chan_id = self.subscriptions.get(removed_sub_id)
if chan_id is None:
return None
self.subscriptions = MappingProxyType(
{
sub: chan
for sub, chan in self.subscriptions.items()
if sub != removed_sub_id
}
)
new_subscriptions_by_channel = {
chan: subs
for chan, subs in self.subscriptions_by_channel.items()
if chan != chan_id
}
new_subs = {
sub_id
for sub_id in self.subscriptions_by_channel.get(chan_id, ())
if sub_id != removed_sub_id
}
if new_subs:
new_subscriptions_by_channel[chan_id] = new_subs
self.subscriptions_by_channel = MappingProxyType(new_subscriptions_by_channel)
return chan_id | 0.829146 | 0.107836 |
import discord
from discord.ext import commands,tasks
from discord.utils import get
from discord import FFmpegPCMAudio
import asyncio
import youtube_dl
import os
import math
from dotenv import load_dotenv
load_dotenv()
help_command = commands.DefaultHelpCommand(
no_category = 'Commands'
)
os.system('cls')
bot = commands.Bot(command_prefix='-',help_command = help_command)
password = str(os.<PASSWORD>("bot_key"))
playlist = []
queue = []
waiting = False
looping = False
loop_current = False
last_song = {}
try:
async def play_song(ctx,song=None):
global queue
global loop_current
global last_song
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}
if song == None:
await ctx.send(f"**Skipped**")
if len(queue) >= 1:
song = queue.pop(0)
elif len(queue) == 0:
last_song = {}
if not looping:
await ctx.send("**Queue is now empty**",delete_after=20)
return
elif song == "same_song":
song = last_song
source = await discord.FFmpegOpusAudio.from_probe(song['url'],**FFMPEG_OPTIONS)
converted_duration = await convert_duration(song['duration'])
if ctx.voice_client.is_playing() == True:
queue.append(song)
length = len(queue)
await ctx.send(content=f"Added to **#{length}** in queue: {song['title']} ({converted_duration})")
elif ctx.voice_client.is_playing() == False:
last_song = song
ctx.voice_client.play(source,after=lambda ex: ctx.bot.loop.create_task(check_queue(ctx)))
await ctx.send(content=f"**Now Playing:** {song['title']} ({converted_duration})")
async def check_queue(ctx):
global loop_current
if (len(queue) >=1):
loop_current = False
next_song = queue.pop(0)
if looping:
queue.append(last_song)
await play_song(ctx,next_song)
else:
if looping:
loop_current= True
await play_song(ctx,"same_song")
else:
loop_current=False
async def convert_duration(duration):
minutes = str(math.floor(duration/60))
seconds = str(duration%60)
if len(seconds) == 1:
seconds + "0"
return f"{minutes}:{seconds}"
@bot.event
async def on_ready():
print(f'{bot.user} logged in')
@bot.command(help = 'Loops the queue playlist',aliases=['r','R','Repeat'])
async def repeat(ctx):
global looping
global queue
if not looping:
looping = True
if len(queue) == 1:
await ctx.send(content=f"**Now Repeating:** 1 song")
else:
await ctx.send(content=f"**Now Repeating:** {len(queue)+1} songs")
elif looping:
looping = False
await ctx.send(content="**No Longer Repeating**",delete_after=20)
@bot.command(help= 'Displays the current queue,aswell as the currently playing song',aliases=['q','Q','Queue','queue'])
async def displayqueue(ctx):
i=0
if last_song != {}:
converted_duration = await convert_duration(last_song['duration'])
message = await ctx.send(f'**Now Playing:** {last_song["title"]} ({converted_duration})')
else:
message = await ctx.send(f'**No song currently playing**')
if queue != []:
await message.edit(content=f'{message.content} \n**Current Queue:**')
for song in queue:
i += 1
converted_duration = await convert_duration(song['duration'])
await message.edit(content=f'{message.content}\n**{i}:** {song["title"]} ({converted_duration})')
elif queue == []:
await ctx.send('**Queue is empty!**')
@bot.command(help='Skips the current song',aliases=['s','S','Skip'])
async def skip(ctx):
ctx.voice_client.stop()
converted_duration = await convert_duration(last_song['duration'])
await ctx.send(f"**Skipped:** {last_song['title']} ({converted_duration})")
@bot.command(help = 'Shuts down the bot. Can only be used by Rybot')
async def shutdown(ctx):
messager = ctx.message.author
if messager.id == 404491098946273280:
await ctx.send(content=f"Ok {messager.name}, shutting down the bot",delete_after=20)
exit()
else:
await ctx.send(content=f"Sorry {messager.name},only Rybot can shut down this bot!",delete_after=20)
@bot.command(pass_content=True,aliases=['c','C','Clear'], help = 'Cancels current track and clears the queue')
async def clear(ctx):
global looping
global waiting
global loop_current
global last_song
waiting = False
looping = False
queue.clear()
loop_current = False
last_song = {}
ctx.voice_client.stop()
@bot.command(pass_content=True,aliases=['p'], help = 'Plays music from YouTube. Can take a url or a song title. Also used to choose search result when specified')
async def play(ctx, *,query=None):
global playlist
global waiting
if query == None:
await ctx.send(content=f"{ctx.message.author.name}: You must enter a song name or url",delete_after=20)
else:
joined = await join(ctx)
if joined:
if waiting and (query == "1" or query =="2" or query=="3" or query == "4" or query == "5"):
selected_song = int(query)-1
waiting = False
song = {
'url': playlist[selected_song]['url'],
'title': playlist[selected_song]['title'],
'duration': playlist[selected_song]['duration'],
}
await play_song(ctx,song)
else:
message = await ctx.send(content=":hourglass: searching")
YDL_OPTIONS = {
'format': 'bestaudio/best',
'default_search': 'ytsearch5',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '196',
}],
}
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = ydl.extract_info(query, download=False)
if not 'entries' in info:
await ctx.message.delete()
song = {
'url': info['formats'][0]['url'],
'title': info['title'],
'duration': info['duration'],
}
await message.delete()
await play_song(ctx,song)
else:
if len(info['entries']) == 0:
await ctx.send(content="Couldn't find anything matching this search, please try a more general query",delete_after=20)
elif len(info['entries']) <= 5:
waiting = True
i = 0
await message.edit(content = "**Please select a track to play with ``-p 1-5`` command:**")
playlist = []
for entry in info["entries"]:
playlist.append({
'url': entry['formats'][0]['url'],
'title': entry['title'],
'duration': entry['duration']
})
converted_duration = await convert_duration(entry['duration'])
await message.edit(content = f"{message.content} \n**{i+1}**: {playlist[i]['title']} ({converted_duration})")
i += 1
elif len(info['entries']) > 5:
await ctx.send("**Detected playlist... adding to queue**")
for entry in info['entries']:
song = {
'url': entry['formats'][0]['url'],
'title': entry['title'],
'duration': entry['duration'],
}
await play_song(ctx,song)
@bot.command(pass_content=True,aliases=['t','T','Toggle'], help = 'Toggles on or off the current music playing ')
async def toggle(ctx):
vc = ctx.voice_client
if vc.is_paused():
vc.resume()
await ctx.send(content="**Resuming**",delete_after=20)
elif vc.is_playing():
vc.pause()
await ctx.send(content="**Paused**",delete_after=20)
@bot.command(pass_content=True,aliases=['j','J','Join'], help = "Makes the bot join your current voice channel. It won't play music")
async def join(ctx):
if not ctx.message.author.voice:
await ctx.send(content=f"{ctx.message.author.name}: You must join a voice channel first",delete_after=20)
return False
else:
channel = ctx.message.author.voice.channel
bot_voice = ctx.voice_client
if bot_voice:
await bot_voice.move_to(channel)
return True
else:
await channel.connect()
return True
@bot.command(pass_content=True,aliases=['l','L','Leave'], help = 'Disconnects the bot from the current voice channel')
async def leave(ctx):
for server_voice_client in ctx.bot.voice_clients:
if server_voice_client.guild == ctx.message.guild:
await server_voice_client.disconnect()
@bot.command(help = 'Displays credits')
async def credits(ctx):
await ctx.send("Created by <NAME>")
except Exception as e:
print(e)
bot.run(password) | bot.py | import discord
from discord.ext import commands,tasks
from discord.utils import get
from discord import FFmpegPCMAudio
import asyncio
import youtube_dl
import os
import math
from dotenv import load_dotenv
load_dotenv()
help_command = commands.DefaultHelpCommand(
no_category = 'Commands'
)
os.system('cls')
bot = commands.Bot(command_prefix='-',help_command = help_command)
password = str(os.<PASSWORD>("bot_key"))
playlist = []
queue = []
waiting = False
looping = False
loop_current = False
last_song = {}
try:
async def play_song(ctx,song=None):
global queue
global loop_current
global last_song
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}
if song == None:
await ctx.send(f"**Skipped**")
if len(queue) >= 1:
song = queue.pop(0)
elif len(queue) == 0:
last_song = {}
if not looping:
await ctx.send("**Queue is now empty**",delete_after=20)
return
elif song == "same_song":
song = last_song
source = await discord.FFmpegOpusAudio.from_probe(song['url'],**FFMPEG_OPTIONS)
converted_duration = await convert_duration(song['duration'])
if ctx.voice_client.is_playing() == True:
queue.append(song)
length = len(queue)
await ctx.send(content=f"Added to **#{length}** in queue: {song['title']} ({converted_duration})")
elif ctx.voice_client.is_playing() == False:
last_song = song
ctx.voice_client.play(source,after=lambda ex: ctx.bot.loop.create_task(check_queue(ctx)))
await ctx.send(content=f"**Now Playing:** {song['title']} ({converted_duration})")
async def check_queue(ctx):
global loop_current
if (len(queue) >=1):
loop_current = False
next_song = queue.pop(0)
if looping:
queue.append(last_song)
await play_song(ctx,next_song)
else:
if looping:
loop_current= True
await play_song(ctx,"same_song")
else:
loop_current=False
async def convert_duration(duration):
minutes = str(math.floor(duration/60))
seconds = str(duration%60)
if len(seconds) == 1:
seconds + "0"
return f"{minutes}:{seconds}"
@bot.event
async def on_ready():
print(f'{bot.user} logged in')
@bot.command(help = 'Loops the queue playlist',aliases=['r','R','Repeat'])
async def repeat(ctx):
global looping
global queue
if not looping:
looping = True
if len(queue) == 1:
await ctx.send(content=f"**Now Repeating:** 1 song")
else:
await ctx.send(content=f"**Now Repeating:** {len(queue)+1} songs")
elif looping:
looping = False
await ctx.send(content="**No Longer Repeating**",delete_after=20)
@bot.command(help= 'Displays the current queue,aswell as the currently playing song',aliases=['q','Q','Queue','queue'])
async def displayqueue(ctx):
i=0
if last_song != {}:
converted_duration = await convert_duration(last_song['duration'])
message = await ctx.send(f'**Now Playing:** {last_song["title"]} ({converted_duration})')
else:
message = await ctx.send(f'**No song currently playing**')
if queue != []:
await message.edit(content=f'{message.content} \n**Current Queue:**')
for song in queue:
i += 1
converted_duration = await convert_duration(song['duration'])
await message.edit(content=f'{message.content}\n**{i}:** {song["title"]} ({converted_duration})')
elif queue == []:
await ctx.send('**Queue is empty!**')
@bot.command(help='Skips the current song',aliases=['s','S','Skip'])
async def skip(ctx):
ctx.voice_client.stop()
converted_duration = await convert_duration(last_song['duration'])
await ctx.send(f"**Skipped:** {last_song['title']} ({converted_duration})")
@bot.command(help = 'Shuts down the bot. Can only be used by Rybot')
async def shutdown(ctx):
messager = ctx.message.author
if messager.id == 404491098946273280:
await ctx.send(content=f"Ok {messager.name}, shutting down the bot",delete_after=20)
exit()
else:
await ctx.send(content=f"Sorry {messager.name},only Rybot can shut down this bot!",delete_after=20)
@bot.command(pass_content=True,aliases=['c','C','Clear'], help = 'Cancels current track and clears the queue')
async def clear(ctx):
global looping
global waiting
global loop_current
global last_song
waiting = False
looping = False
queue.clear()
loop_current = False
last_song = {}
ctx.voice_client.stop()
@bot.command(pass_content=True,aliases=['p'], help = 'Plays music from YouTube. Can take a url or a song title. Also used to choose search result when specified')
async def play(ctx, *,query=None):
global playlist
global waiting
if query == None:
await ctx.send(content=f"{ctx.message.author.name}: You must enter a song name or url",delete_after=20)
else:
joined = await join(ctx)
if joined:
if waiting and (query == "1" or query =="2" or query=="3" or query == "4" or query == "5"):
selected_song = int(query)-1
waiting = False
song = {
'url': playlist[selected_song]['url'],
'title': playlist[selected_song]['title'],
'duration': playlist[selected_song]['duration'],
}
await play_song(ctx,song)
else:
message = await ctx.send(content=":hourglass: searching")
YDL_OPTIONS = {
'format': 'bestaudio/best',
'default_search': 'ytsearch5',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '196',
}],
}
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = ydl.extract_info(query, download=False)
if not 'entries' in info:
await ctx.message.delete()
song = {
'url': info['formats'][0]['url'],
'title': info['title'],
'duration': info['duration'],
}
await message.delete()
await play_song(ctx,song)
else:
if len(info['entries']) == 0:
await ctx.send(content="Couldn't find anything matching this search, please try a more general query",delete_after=20)
elif len(info['entries']) <= 5:
waiting = True
i = 0
await message.edit(content = "**Please select a track to play with ``-p 1-5`` command:**")
playlist = []
for entry in info["entries"]:
playlist.append({
'url': entry['formats'][0]['url'],
'title': entry['title'],
'duration': entry['duration']
})
converted_duration = await convert_duration(entry['duration'])
await message.edit(content = f"{message.content} \n**{i+1}**: {playlist[i]['title']} ({converted_duration})")
i += 1
elif len(info['entries']) > 5:
await ctx.send("**Detected playlist... adding to queue**")
for entry in info['entries']:
song = {
'url': entry['formats'][0]['url'],
'title': entry['title'],
'duration': entry['duration'],
}
await play_song(ctx,song)
@bot.command(pass_content=True,aliases=['t','T','Toggle'], help = 'Toggles on or off the current music playing ')
async def toggle(ctx):
vc = ctx.voice_client
if vc.is_paused():
vc.resume()
await ctx.send(content="**Resuming**",delete_after=20)
elif vc.is_playing():
vc.pause()
await ctx.send(content="**Paused**",delete_after=20)
@bot.command(pass_content=True,aliases=['j','J','Join'], help = "Makes the bot join your current voice channel. It won't play music")
async def join(ctx):
if not ctx.message.author.voice:
await ctx.send(content=f"{ctx.message.author.name}: You must join a voice channel first",delete_after=20)
return False
else:
channel = ctx.message.author.voice.channel
bot_voice = ctx.voice_client
if bot_voice:
await bot_voice.move_to(channel)
return True
else:
await channel.connect()
return True
@bot.command(pass_content=True,aliases=['l','L','Leave'], help = 'Disconnects the bot from the current voice channel')
async def leave(ctx):
for server_voice_client in ctx.bot.voice_clients:
if server_voice_client.guild == ctx.message.guild:
await server_voice_client.disconnect()
@bot.command(help = 'Displays credits')
async def credits(ctx):
await ctx.send("Created by <NAME>")
except Exception as e:
print(e)
bot.run(password) | 0.16248 | 0.078078 |
import tensorflow as tf
import numpy as np
from sklearn.manifold import TSNE
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
from segmentpy.tf114.util import check_N_mkdir
def tsne_on_activation(embedded_tensor, labels, figsize=(45, 45), zoom=1, suffix='step0'):
"""
inputs:
-------
embedded_tensor: (numpy ndarray)
labels: (numpy ndarray?)
figsize: (tuple of int)
zoom: (int)
suffix: (str)
return:
-------
None
"""
assert embedded_tensor.shape[0] >= len(labels), 'You should have embeddings then labels'
fig, ax = plt.subplot(figsize=figsize)
artists = []
for xy, i in zip(embedded_tensor, labels):
x, y = xy
img = OffsetImage(i, zoom=zoom)
ab = AnnotationBbox(img, (x, y), xycoords='data', framon=False)
artists.append(ax.add_artist(ab))
ax.update_datalim(embedded_tensor)
ax.autoscale()
plt.savefig(
'./dummy/tsne_act_{}.png'.format(suffix), #fixme: change here
dpi=45 # 2048 pixel divided by 45 = 45
)
def compare_tsne_2D(embedded_tensor, labels, grps, which, figsize=(90, 90), rlt_dir=None, preffix='Weights', fst=0, sec=0):
"""
inputs:
-------
embedded_tensor: (numpy ndarray)
labels: (numpy ndarray?)
grps: (pandas column)
figsize: (tuple of int)
suffix: (str)
return:
-------
None
"""
assert rlt_dir != None, "enter a rlt_dir"
assert embedded_tensor.shape[0] >= len(labels), 'You should have more embeddings then labels'
df = pd.DataFrame(zip(embedded_tensor[:, 0], embedded_tensor[:, 1], labels, grps, which))
df.columns = ['coordX', 'coordY', 'labels', 'layers', 'which']
df_init = df.loc[df['which'] == 0]
df_evolv = df.loc[df['which'] == 1]
# convert column groups to categories int
df_init['colors'] = pd.Categorical(df_init['layers']).codes
df_evolv['colors'] = pd.Categorical(df_evolv['layers']).codes
df['colors'] = pd.Categorical(df['layers']).codes
# 2D scatter plots
fig1, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=figsize)
scat1 = ax1.scatter(df_init['coordX'], df_init['coordY'], c=df_init['colors'], cmap='coolwarm', alpha=0.5)
scat2 = ax2.scatter(df_evolv['coordX'], df_evolv['coordY'], c=df_evolv['colors'], cmap='coolwarm', alpha=0.5)
scat3 = ax3.scatter(df_init['coordX'], df_init['coordY'], c='black')
ax3.scatter(df_evolv['coordX'], df_evolv['coordY'], c=df_evolv['colors'], cmap='coolwarm', alpha=0.5)
scat4 = ax4.quiver(
np.asarray(df_init['coordX']),
np.asarray(df_init['coordY']),
np.asarray(df_evolv['coordX']) - np.asarray(df_init['coordX']),
np.asarray(df_evolv['coordY']) - np.asarray(df_init['coordY']),
scale_units='xy', angles='xy', scale=1,
)
scat4 = ax4.scatter(df_init['coordX'], df_init['coordY'], c='black')
ax4.scatter(df_evolv['coordX'], df_evolv['coordY'], c=df_evolv['colors'], cmap='coolwarm', alpha=0.5)
# set titles
ax1.set_title('Init weights')
ax2.set_title('Evolved weights')
ax3.set_title('Compare weights')
ax4.set_title('Trajectory')
# set legends
leg1 = ax1.legend(scat1.legend_elements()[0], df_init['layers'].unique(), title='Init Layers') #note: unique() might change order
ax1.add_artist(leg1)
leg2 = ax2.legend(scat2.legend_elements()[0], df_evolv['layers'].unique(), title='Evolved Layers') #note: unique() might change order
ax2.add_artist(leg2)
# leg3 = ax3.legend(scat3.legend_elements()[0], df['which'].unique(), title='Init vs Evolve') #note: unique() might change order
# ax3.add_artist(leg3)
# ax3.legend(loc='center left', bbox_to_anchor=(1.04, 0.5))
check_N_mkdir(rlt_dir)
plt.savefig(rlt_dir + '{}_2D_plot_step{}_vs_step{}_trajectory.png'.format(preffix, fst, sec))
pd.DataFrame(df_init).to_csv(rlt_dir + '{}_2D_plot_step{}.csv'.format(preffix, fst))
pd.DataFrame(df_evolv).to_csv(rlt_dir + '{}_2D_plot_step{}.csv'.format(preffix, sec))
plt.show()
def compare_tsne_3D(embedded_tensor, labels, grps, which, figsize=(90, 90), rlt_dir=None, suffix=0):
"""
inputs:
-------
embedded_tensor: (numpy ndarray)
labels: (numpy ndarray?)
grps: (pandas column)
figsize: (tuple of int)
suffix: (str)
return:
-------
None
"""
assert rlt_dir != None, "enter a rlt_dir"
assert embedded_tensor.shape[0] >= len(labels), 'You should have more embeddings then labels'
# group data with pandas
df = pd.DataFrame(zip(embedded_tensor[:, 0], embedded_tensor[:, 1], embedded_tensor[:, 2], labels, grps, which))
df.columns = ['coordX', 'coordY', 'coordZ', 'labels', 'layers', 'which']
df_init = df.loc[df['which'] == 0]
df_evolv = df.loc[~df['which'] == 1]
# convert colume groups to categories int
df_init['colors'] = pd.Categorical(df_init['layers']).codes
df_evolv['colors'] = pd.Categorical(df_evolv['layers']).codes
df['colors'] = pd.Categorical(df['layers']).codes
# plots conv
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
ax.set_title('Weights from encoder')
ax.scatter(df_init['coordX'], df_init['coordY'], df_init['coordZ'], c=df_init['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
check_N_mkdir(rlt_dir)
# plt.savefig(rlt_dir + 'conv_weights_3Dplot_step{}.png'.format(suffix))
# plots deconv
fig2 = plt.figure(figsize=figsize)
ax2 = Axes3D(fig2)
ax2.set_title('Weights from decoder')
ax2.scatter(df_evolv['coordX'], df_evolv['coordY'], df_evolv['coordZ'], c=df_evolv['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax2.set_xlabel('X')
ax2.set_ylabel('Y')
ax2.set_zlabel('Z')
# plots conv
fig3 = plt.figure(figsize=figsize)
ax3 = Axes3D(fig3)
ax3.set_title('Weights from all layers')
ax3.scatter(df_init['coordX'], df_init['coordY'], df_init['coordZ'], c=df_init['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax3.scatter(df_evolv['coordX'], df_evolv['coordY'], df_evolv['coordZ'], c=df_evolv['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax3.set_xlabel('X')
ax3.set_ylabel('Y')
ax3.set_zlabel('Z')
plt.show()
def tsne(tensor, perplexity=6000, niter=5000, mode='2D'):
"""
inputs:
-------
tensor: (numpy ndarray)
perplexity: (int)
niter: (int)
mode: (str)
return:
-------
res: (numpy ndarray) reduced n-dimensions array
"""
if mode == '2D':
t_sne = TSNE(perplexity=perplexity, n_components=2, init='random', n_iter=niter)
res = t_sne.fit_transform(tensor)
elif mode == '3D':
t_sne = TSNE(perplexity=perplexity, n_components=3, init='random', n_iter=niter)
res = t_sne.fit_transform(tensor)
else:
raise ValueError('Please choose a mode among 2D, 3D, sklearn or tf!')
return res | src/segmentpy/tf114/tsne.py | import tensorflow as tf
import numpy as np
from sklearn.manifold import TSNE
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
from segmentpy.tf114.util import check_N_mkdir
def tsne_on_activation(embedded_tensor, labels, figsize=(45, 45), zoom=1, suffix='step0'):
"""
inputs:
-------
embedded_tensor: (numpy ndarray)
labels: (numpy ndarray?)
figsize: (tuple of int)
zoom: (int)
suffix: (str)
return:
-------
None
"""
assert embedded_tensor.shape[0] >= len(labels), 'You should have embeddings then labels'
fig, ax = plt.subplot(figsize=figsize)
artists = []
for xy, i in zip(embedded_tensor, labels):
x, y = xy
img = OffsetImage(i, zoom=zoom)
ab = AnnotationBbox(img, (x, y), xycoords='data', framon=False)
artists.append(ax.add_artist(ab))
ax.update_datalim(embedded_tensor)
ax.autoscale()
plt.savefig(
'./dummy/tsne_act_{}.png'.format(suffix), #fixme: change here
dpi=45 # 2048 pixel divided by 45 = 45
)
def compare_tsne_2D(embedded_tensor, labels, grps, which, figsize=(90, 90), rlt_dir=None, preffix='Weights', fst=0, sec=0):
"""
inputs:
-------
embedded_tensor: (numpy ndarray)
labels: (numpy ndarray?)
grps: (pandas column)
figsize: (tuple of int)
suffix: (str)
return:
-------
None
"""
assert rlt_dir != None, "enter a rlt_dir"
assert embedded_tensor.shape[0] >= len(labels), 'You should have more embeddings then labels'
df = pd.DataFrame(zip(embedded_tensor[:, 0], embedded_tensor[:, 1], labels, grps, which))
df.columns = ['coordX', 'coordY', 'labels', 'layers', 'which']
df_init = df.loc[df['which'] == 0]
df_evolv = df.loc[df['which'] == 1]
# convert column groups to categories int
df_init['colors'] = pd.Categorical(df_init['layers']).codes
df_evolv['colors'] = pd.Categorical(df_evolv['layers']).codes
df['colors'] = pd.Categorical(df['layers']).codes
# 2D scatter plots
fig1, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=figsize)
scat1 = ax1.scatter(df_init['coordX'], df_init['coordY'], c=df_init['colors'], cmap='coolwarm', alpha=0.5)
scat2 = ax2.scatter(df_evolv['coordX'], df_evolv['coordY'], c=df_evolv['colors'], cmap='coolwarm', alpha=0.5)
scat3 = ax3.scatter(df_init['coordX'], df_init['coordY'], c='black')
ax3.scatter(df_evolv['coordX'], df_evolv['coordY'], c=df_evolv['colors'], cmap='coolwarm', alpha=0.5)
scat4 = ax4.quiver(
np.asarray(df_init['coordX']),
np.asarray(df_init['coordY']),
np.asarray(df_evolv['coordX']) - np.asarray(df_init['coordX']),
np.asarray(df_evolv['coordY']) - np.asarray(df_init['coordY']),
scale_units='xy', angles='xy', scale=1,
)
scat4 = ax4.scatter(df_init['coordX'], df_init['coordY'], c='black')
ax4.scatter(df_evolv['coordX'], df_evolv['coordY'], c=df_evolv['colors'], cmap='coolwarm', alpha=0.5)
# set titles
ax1.set_title('Init weights')
ax2.set_title('Evolved weights')
ax3.set_title('Compare weights')
ax4.set_title('Trajectory')
# set legends
leg1 = ax1.legend(scat1.legend_elements()[0], df_init['layers'].unique(), title='Init Layers') #note: unique() might change order
ax1.add_artist(leg1)
leg2 = ax2.legend(scat2.legend_elements()[0], df_evolv['layers'].unique(), title='Evolved Layers') #note: unique() might change order
ax2.add_artist(leg2)
# leg3 = ax3.legend(scat3.legend_elements()[0], df['which'].unique(), title='Init vs Evolve') #note: unique() might change order
# ax3.add_artist(leg3)
# ax3.legend(loc='center left', bbox_to_anchor=(1.04, 0.5))
check_N_mkdir(rlt_dir)
plt.savefig(rlt_dir + '{}_2D_plot_step{}_vs_step{}_trajectory.png'.format(preffix, fst, sec))
pd.DataFrame(df_init).to_csv(rlt_dir + '{}_2D_plot_step{}.csv'.format(preffix, fst))
pd.DataFrame(df_evolv).to_csv(rlt_dir + '{}_2D_plot_step{}.csv'.format(preffix, sec))
plt.show()
def compare_tsne_3D(embedded_tensor, labels, grps, which, figsize=(90, 90), rlt_dir=None, suffix=0):
"""
inputs:
-------
embedded_tensor: (numpy ndarray)
labels: (numpy ndarray?)
grps: (pandas column)
figsize: (tuple of int)
suffix: (str)
return:
-------
None
"""
assert rlt_dir != None, "enter a rlt_dir"
assert embedded_tensor.shape[0] >= len(labels), 'You should have more embeddings then labels'
# group data with pandas
df = pd.DataFrame(zip(embedded_tensor[:, 0], embedded_tensor[:, 1], embedded_tensor[:, 2], labels, grps, which))
df.columns = ['coordX', 'coordY', 'coordZ', 'labels', 'layers', 'which']
df_init = df.loc[df['which'] == 0]
df_evolv = df.loc[~df['which'] == 1]
# convert colume groups to categories int
df_init['colors'] = pd.Categorical(df_init['layers']).codes
df_evolv['colors'] = pd.Categorical(df_evolv['layers']).codes
df['colors'] = pd.Categorical(df['layers']).codes
# plots conv
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
ax.set_title('Weights from encoder')
ax.scatter(df_init['coordX'], df_init['coordY'], df_init['coordZ'], c=df_init['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
check_N_mkdir(rlt_dir)
# plt.savefig(rlt_dir + 'conv_weights_3Dplot_step{}.png'.format(suffix))
# plots deconv
fig2 = plt.figure(figsize=figsize)
ax2 = Axes3D(fig2)
ax2.set_title('Weights from decoder')
ax2.scatter(df_evolv['coordX'], df_evolv['coordY'], df_evolv['coordZ'], c=df_evolv['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax2.set_xlabel('X')
ax2.set_ylabel('Y')
ax2.set_zlabel('Z')
# plots conv
fig3 = plt.figure(figsize=figsize)
ax3 = Axes3D(fig3)
ax3.set_title('Weights from all layers')
ax3.scatter(df_init['coordX'], df_init['coordY'], df_init['coordZ'], c=df_init['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax3.scatter(df_evolv['coordX'], df_evolv['coordY'], df_evolv['coordZ'], c=df_evolv['colors'], cmap=plt.get_cmap('Spectral'), marker='o')
ax3.set_xlabel('X')
ax3.set_ylabel('Y')
ax3.set_zlabel('Z')
plt.show()
def tsne(tensor, perplexity=6000, niter=5000, mode='2D'):
"""
inputs:
-------
tensor: (numpy ndarray)
perplexity: (int)
niter: (int)
mode: (str)
return:
-------
res: (numpy ndarray) reduced n-dimensions array
"""
if mode == '2D':
t_sne = TSNE(perplexity=perplexity, n_components=2, init='random', n_iter=niter)
res = t_sne.fit_transform(tensor)
elif mode == '3D':
t_sne = TSNE(perplexity=perplexity, n_components=3, init='random', n_iter=niter)
res = t_sne.fit_transform(tensor)
else:
raise ValueError('Please choose a mode among 2D, 3D, sklearn or tf!')
return res | 0.769124 | 0.637905 |
import os
from login import *
list_categorias = []
def categorias ():
os.system('cls')
print(":::: MENU CATEGORIAS ::::")
print("[1.] INGRESAR NUEVA CATEGORIA")
print("[2.] LISTAR CATEGORIAS")
print("[3.] BUSCAR UNA CATEGORIA")
print("[4.] MODIFICAR CATEGORIA")
print("[5.] ELIMINAR CATEGORIA")
print("[6.] VOLVER AL MENU")
print("[7.] SALIR")
op = input(".:: DIGITE UNA OPCION: ")
if op == '1' :
ingresar_categoria()
elif op == '2' :
listar_categoria()
elif op == '3' :
buscar_categoria()
elif op == '4' :
modificar_categoria()
elif op == '5' :
eliminar_categoria()
elif op == '6':
categorias()
elif op == '7':
salir()
def ingresar_categoria():
os.system('cls')
print("::: INGRESO NUEVA CATEGORIA ::: ")
nomcategoria = (input(" DIGITE CATEGORIA A ALMACENAR: "))
list_categorias.append(nomcategoria)
key = input(" LA CATEGORIA HA SIDO ALMACENADA CON EXITO !. Presione cualquier tecla para volver al menú")
categorias()
def listar_categoria():
os.system('cls')
print("::: LISTADO DE CATEGORIAS ::: ")
if len(list_categorias) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
else :
print(list_categorias)
key = input("Presione cualquier tecla para volver al menú")
categorias()
def buscar_categoria():
os.system('cls')
buscar = 0
print("::: BÚSQUEDA DE CATEGORIAS ::: ")
#Here we must to look for a number
if len(list_categorias) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
else :
buscar = input("DIGITE CATEGORIA A BUSCAR: ")
i = 0
encontrado = False
while i < len(list_categorias) :#-1
if buscar == list_categorias[i] :
encontrado = True
i += 1
if encontrado == True :
print("::: LA CATEGORIA FUE ENCONTRADA EN LA LISTA")
else :
print("::: LA CATEGORIA NO FUE ENCONTRADA EN LA LISTA")
key = input("Presione cualquier tecla para volver al menú")
categorias()
def modificar_categoria():
os.system('cls')
print("::: MODIFICACIÓN DE CATEGORIA ::: ")
if len(list_categorias) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
else :
buscar = input("DIGITE CATEGORIA A MODIFICAR: ")
i = 0
encontrado = False
while i < len(list_categorias) :#-1
if buscar == list_categorias[i] :
encontrado = True
i += 1
if encontrado == True :
#Categorias.remove = Categorias[i]
modificar = input(" DIGITE NUEVA CATEGORIA")
list_categorias.insert(len(list_categorias),modificar)
else :
print("::: LA CATEGORIA NO FUE ENCONTRADA EN LA LISTA")
key = input("Presione cualquier tecla para volver al menú")
categorias()
def eliminar_categoria():
os.system('cls')
print("::::::: ELIMINAR CATEGORIA :::::::")
elim = input("DIGITE CATEGORIA A ELIMINAR")
list_categorias.remove(elim)
key = input(" Persione cualquier tecla para volver al menu")
categorias()
def salir ():
print("Hasta Luego, vuelve pronto, eres importante, cuidate")
sys.exit() | Final/categorias.py | import os
from login import *
list_categorias = []
def categorias ():
os.system('cls')
print(":::: MENU CATEGORIAS ::::")
print("[1.] INGRESAR NUEVA CATEGORIA")
print("[2.] LISTAR CATEGORIAS")
print("[3.] BUSCAR UNA CATEGORIA")
print("[4.] MODIFICAR CATEGORIA")
print("[5.] ELIMINAR CATEGORIA")
print("[6.] VOLVER AL MENU")
print("[7.] SALIR")
op = input(".:: DIGITE UNA OPCION: ")
if op == '1' :
ingresar_categoria()
elif op == '2' :
listar_categoria()
elif op == '3' :
buscar_categoria()
elif op == '4' :
modificar_categoria()
elif op == '5' :
eliminar_categoria()
elif op == '6':
categorias()
elif op == '7':
salir()
def ingresar_categoria():
os.system('cls')
print("::: INGRESO NUEVA CATEGORIA ::: ")
nomcategoria = (input(" DIGITE CATEGORIA A ALMACENAR: "))
list_categorias.append(nomcategoria)
key = input(" LA CATEGORIA HA SIDO ALMACENADA CON EXITO !. Presione cualquier tecla para volver al menú")
categorias()
def listar_categoria():
os.system('cls')
print("::: LISTADO DE CATEGORIAS ::: ")
if len(list_categorias) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
else :
print(list_categorias)
key = input("Presione cualquier tecla para volver al menú")
categorias()
def buscar_categoria():
os.system('cls')
buscar = 0
print("::: BÚSQUEDA DE CATEGORIAS ::: ")
#Here we must to look for a number
if len(list_categorias) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
else :
buscar = input("DIGITE CATEGORIA A BUSCAR: ")
i = 0
encontrado = False
while i < len(list_categorias) :#-1
if buscar == list_categorias[i] :
encontrado = True
i += 1
if encontrado == True :
print("::: LA CATEGORIA FUE ENCONTRADA EN LA LISTA")
else :
print("::: LA CATEGORIA NO FUE ENCONTRADA EN LA LISTA")
key = input("Presione cualquier tecla para volver al menú")
categorias()
def modificar_categoria():
os.system('cls')
print("::: MODIFICACIÓN DE CATEGORIA ::: ")
if len(list_categorias) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
else :
buscar = input("DIGITE CATEGORIA A MODIFICAR: ")
i = 0
encontrado = False
while i < len(list_categorias) :#-1
if buscar == list_categorias[i] :
encontrado = True
i += 1
if encontrado == True :
#Categorias.remove = Categorias[i]
modificar = input(" DIGITE NUEVA CATEGORIA")
list_categorias.insert(len(list_categorias),modificar)
else :
print("::: LA CATEGORIA NO FUE ENCONTRADA EN LA LISTA")
key = input("Presione cualquier tecla para volver al menú")
categorias()
def eliminar_categoria():
os.system('cls')
print("::::::: ELIMINAR CATEGORIA :::::::")
elim = input("DIGITE CATEGORIA A ELIMINAR")
list_categorias.remove(elim)
key = input(" Persione cualquier tecla para volver al menu")
categorias()
def salir ():
print("Hasta Luego, vuelve pronto, eres importante, cuidate")
sys.exit() | 0.053949 | 0.194119 |
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import pandas as pd
def process_header_lines(header_lines):
h = {}
for l in header_lines:
split = l.strip('#').strip("\n").split("=", 1)
try:
if not split[0] in h:
h[split[0]] = [split[1]]
else:
h[split[0]] += [split[1]]
except:
pass
return h
def process_info_fields(info_str):
split = info_str.strip("<>\"").split(': ')[1].split("|")
info_map = dict(zip(split, list(range(0, len(split)))))
return info_map
def extract_records(info_str, canonical=False):
records = info_str.split(",")
records = [r.split("|") for r in records]
if canonical:
records = [r for r in records if r[24] == 'YES']
return records
def process_mutation_scores(records, info_fields_map):
score_map = {
'stSNV': {
"SIFT": 0,
"PPH2": 1,
"MA": 3.5
},
'fsindel': {
"SIFT": 0,
"PPH2": 1,
"MA": 3.5
},
'sSNV': {
"SIFT": 1,
"PPH2": 0,
"MA": -2
}
}
consequence_map = {
"missense_variant": "nsSNV",
"synonymous_variant": "sSNV",
"frameshift_variant": "fsindel",
"stop_gained": "stSNV"
}
gene = []
sift = []
pph = []
ma = []
for r in records:
if r[info_fields_map['Consequence']] in consequence_map:
gene.append(r[info_fields_map['Gene']])
if r[info_fields_map['Consequence']] == "missense_variant":
sift.extend(
[eval_else(v, float) for v in r[info_fields_map['SIFT_score']].split("&")]
)
pph.extend(
[eval_else(v, float) for v in r[info_fields_map['Polyphen2_HDIV_score']].split("&")]
)
ma.extend(
[eval_else(v, float) for v in r[info_fields_map['MutationAssessor_score']].split("&")]
)
else:
sift.append(
score_map[consequence_map[r[info_fields_map['Consequence']]]]['SIFT']
)
pph.append(
score_map[consequence_map[r[info_fields_map['Consequence']]]]['PPH2']
)
ma.append(
score_map[consequence_map[r[info_fields_map['Consequence']]]]['MA']
)
else:
break
if len(gene) >= 1:
if all(x == gene[0] for x in gene):
gene = gene[0]
else:
print("[WARNING] multiple gene ids are associated with this variant:", records, file=sys.stderr)
gene = max(set(gene), key=gene.count)
else:
gene = None
return [gene, eval_else(sift, min), eval_else(pph, max), eval_else(ma, max)]
def eval_else(v, func, ret=np.nan):
try:
return func(v)
except:
return ret
# vcfFile = "/Users/strucka/Projects/dockerized_tools/vep/cromwell-executions/test/6606e7a3-39dc-4556-a86a-5a603e777cd0/call-variant_effect_predictor/vep_output.vcf"
def main(vcf_file, output_file):
# read files
header_lines = os.popen('head -5000 ' + vcf_file).readlines()
header_lines = [l for l in header_lines if l.startswith('#')]
vcf_header = process_header_lines(header_lines)
info_fields = process_info_fields(vcf_header['INFO'][0])
vcf = pd.read_table(vcf_file, header=len(header_lines)-1, na_values="./.:.:.", low_memory=False)
# extract sift, pph2 and ma scores
info_df = vcf['INFO'].apply(lambda x: extract_records(x, canonical=False)).apply(lambda x: process_mutation_scores(x, info_fields)).apply(pd.Series)
info_df.columns = ['Gene', 'SIFT_score', 'Polyphen2_HDIV_score', 'MutationAssessor_score']
# merge with vcf
min_vcf = vcf.drop(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "FORMAT"], axis=1, inplace=False)
expanded_vcf = pd.concat(
[
min_vcf.drop(["INFO"], axis=1, inplace=False),
info_df
],
axis=1
)
# reshape
melted = pd.melt(expanded_vcf,
id_vars=['Gene',
'SIFT_score',
'Polyphen2_HDIV_score',
'MutationAssessor_score'],
var_name="sample",
value_name="mutation_status")
# drop na, reorder columns and rename columns
filtered = melted.dropna()[melted['sample'] != "NORMAL"]
reordered = filtered[["sample", "Gene", "SIFT_score", "Polyphen2_HDIV_score", "MutationAssessor_score"]]
reordered.columns = ["SAMPLE", "GENE", "SIFT", "PPH2", "MA"]
# write to output file
reordered.to_csv(output_file, sep="\t", header=True, index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("vcf_file",
type=str,
help="VEP annotated VCF file with cols: Gene, SIFT_score, Polyphen2_HDIV_score, MutationAssessor_score")
parser.add_argument("-o",
dest="output_file",
type=str,
default="oncodrivefm_input.tdm",
help="output file")
args = parser.parse_args()
main(args.vcf_file, args.output_file) | full_pipeline/oncodrivefm_input_file_generator.py |
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import pandas as pd
def process_header_lines(header_lines):
h = {}
for l in header_lines:
split = l.strip('#').strip("\n").split("=", 1)
try:
if not split[0] in h:
h[split[0]] = [split[1]]
else:
h[split[0]] += [split[1]]
except:
pass
return h
def process_info_fields(info_str):
split = info_str.strip("<>\"").split(': ')[1].split("|")
info_map = dict(zip(split, list(range(0, len(split)))))
return info_map
def extract_records(info_str, canonical=False):
records = info_str.split(",")
records = [r.split("|") for r in records]
if canonical:
records = [r for r in records if r[24] == 'YES']
return records
def process_mutation_scores(records, info_fields_map):
score_map = {
'stSNV': {
"SIFT": 0,
"PPH2": 1,
"MA": 3.5
},
'fsindel': {
"SIFT": 0,
"PPH2": 1,
"MA": 3.5
},
'sSNV': {
"SIFT": 1,
"PPH2": 0,
"MA": -2
}
}
consequence_map = {
"missense_variant": "nsSNV",
"synonymous_variant": "sSNV",
"frameshift_variant": "fsindel",
"stop_gained": "stSNV"
}
gene = []
sift = []
pph = []
ma = []
for r in records:
if r[info_fields_map['Consequence']] in consequence_map:
gene.append(r[info_fields_map['Gene']])
if r[info_fields_map['Consequence']] == "missense_variant":
sift.extend(
[eval_else(v, float) for v in r[info_fields_map['SIFT_score']].split("&")]
)
pph.extend(
[eval_else(v, float) for v in r[info_fields_map['Polyphen2_HDIV_score']].split("&")]
)
ma.extend(
[eval_else(v, float) for v in r[info_fields_map['MutationAssessor_score']].split("&")]
)
else:
sift.append(
score_map[consequence_map[r[info_fields_map['Consequence']]]]['SIFT']
)
pph.append(
score_map[consequence_map[r[info_fields_map['Consequence']]]]['PPH2']
)
ma.append(
score_map[consequence_map[r[info_fields_map['Consequence']]]]['MA']
)
else:
break
if len(gene) >= 1:
if all(x == gene[0] for x in gene):
gene = gene[0]
else:
print("[WARNING] multiple gene ids are associated with this variant:", records, file=sys.stderr)
gene = max(set(gene), key=gene.count)
else:
gene = None
return [gene, eval_else(sift, min), eval_else(pph, max), eval_else(ma, max)]
def eval_else(v, func, ret=np.nan):
try:
return func(v)
except:
return ret
# vcfFile = "/Users/strucka/Projects/dockerized_tools/vep/cromwell-executions/test/6606e7a3-39dc-4556-a86a-5a603e777cd0/call-variant_effect_predictor/vep_output.vcf"
def main(vcf_file, output_file):
# read files
header_lines = os.popen('head -5000 ' + vcf_file).readlines()
header_lines = [l for l in header_lines if l.startswith('#')]
vcf_header = process_header_lines(header_lines)
info_fields = process_info_fields(vcf_header['INFO'][0])
vcf = pd.read_table(vcf_file, header=len(header_lines)-1, na_values="./.:.:.", low_memory=False)
# extract sift, pph2 and ma scores
info_df = vcf['INFO'].apply(lambda x: extract_records(x, canonical=False)).apply(lambda x: process_mutation_scores(x, info_fields)).apply(pd.Series)
info_df.columns = ['Gene', 'SIFT_score', 'Polyphen2_HDIV_score', 'MutationAssessor_score']
# merge with vcf
min_vcf = vcf.drop(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "FORMAT"], axis=1, inplace=False)
expanded_vcf = pd.concat(
[
min_vcf.drop(["INFO"], axis=1, inplace=False),
info_df
],
axis=1
)
# reshape
melted = pd.melt(expanded_vcf,
id_vars=['Gene',
'SIFT_score',
'Polyphen2_HDIV_score',
'MutationAssessor_score'],
var_name="sample",
value_name="mutation_status")
# drop na, reorder columns and rename columns
filtered = melted.dropna()[melted['sample'] != "NORMAL"]
reordered = filtered[["sample", "Gene", "SIFT_score", "Polyphen2_HDIV_score", "MutationAssessor_score"]]
reordered.columns = ["SAMPLE", "GENE", "SIFT", "PPH2", "MA"]
# write to output file
reordered.to_csv(output_file, sep="\t", header=True, index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("vcf_file",
type=str,
help="VEP annotated VCF file with cols: Gene, SIFT_score, Polyphen2_HDIV_score, MutationAssessor_score")
parser.add_argument("-o",
dest="output_file",
type=str,
default="oncodrivefm_input.tdm",
help="output file")
args = parser.parse_args()
main(args.vcf_file, args.output_file) | 0.368065 | 0.275118 |
class CoalaIpError(Exception):
"""Base class for all Coala IP errors."""
class IncompatiblePluginError(CoalaIpError, ValueError):
"""Raised when entities with incompatible plugins are used together.
Should contain a list of the incompatible plugins as the first
argument.
"""
@property
def incompatible_plugins(self):
""":obj:`list` of :class:`~coalaip.plugin.AbstractPlugin`:
Incompatible plugins
"""
return self.args[0]
class ModelError(CoalaIpError):
"""Base class for all model errors."""
class ModelDataError(ModelError, ValueError):
"""Raised if there is an error with the model's data."""
class ModelNotYetLoadedError(ModelError):
"""Raised if the lazily loaded model has not been loaded from the
backing persistence layer yet."""
class PersistenceError(CoalaIpError):
"""Base class for all persistence-related errors.
Attributes:
message (str): Message of the error
error (:exc:`Exception`): Original exception, if available
"""
def __init__(self, message='', error=None):
self.message = message
self.error = error
def __str__(self):
return self.message
class EntityCreationError(PersistenceError):
"""Raised if an error occured during the creation of an entity on the
backing persistence layer.
Should contain the original error that caused the failure, if
available.
"""
class EntityNotFoundError(PersistenceError):
"""Raised if the entity could not be found on the backing persistence
layer
"""
class EntityNotYetPersistedError(PersistenceError):
"""Raised when an action requiring an entity to be available on the
persistence layer is attempted on an entity that has not been
persisted yet.
"""
class EntityPreviouslyCreatedError(PersistenceError):
"""Raised when attempting to persist an already persisted entity.
Should contain the existing id of the entity.
Attributes:
existing_id (str): Currently existing id of the entity on the
persistence layer
See :exc:`.PersistenceError` for other attributes.
"""
def __init__(self, existing_id, *args, **kwargs):
self.existing_id = existing_id
super().__init__(*args, **kwargs)
class EntityTransferError(PersistenceError):
"""Raised if an error occured during the transfer of an entity on the
backing persistence layer.
Should contain the original error that caused the failure, if
available.
""" | coalaip/exceptions.py | class CoalaIpError(Exception):
"""Base class for all Coala IP errors."""
class IncompatiblePluginError(CoalaIpError, ValueError):
"""Raised when entities with incompatible plugins are used together.
Should contain a list of the incompatible plugins as the first
argument.
"""
@property
def incompatible_plugins(self):
""":obj:`list` of :class:`~coalaip.plugin.AbstractPlugin`:
Incompatible plugins
"""
return self.args[0]
class ModelError(CoalaIpError):
"""Base class for all model errors."""
class ModelDataError(ModelError, ValueError):
"""Raised if there is an error with the model's data."""
class ModelNotYetLoadedError(ModelError):
"""Raised if the lazily loaded model has not been loaded from the
backing persistence layer yet."""
class PersistenceError(CoalaIpError):
"""Base class for all persistence-related errors.
Attributes:
message (str): Message of the error
error (:exc:`Exception`): Original exception, if available
"""
def __init__(self, message='', error=None):
self.message = message
self.error = error
def __str__(self):
return self.message
class EntityCreationError(PersistenceError):
"""Raised if an error occured during the creation of an entity on the
backing persistence layer.
Should contain the original error that caused the failure, if
available.
"""
class EntityNotFoundError(PersistenceError):
"""Raised if the entity could not be found on the backing persistence
layer
"""
class EntityNotYetPersistedError(PersistenceError):
"""Raised when an action requiring an entity to be available on the
persistence layer is attempted on an entity that has not been
persisted yet.
"""
class EntityPreviouslyCreatedError(PersistenceError):
"""Raised when attempting to persist an already persisted entity.
Should contain the existing id of the entity.
Attributes:
existing_id (str): Currently existing id of the entity on the
persistence layer
See :exc:`.PersistenceError` for other attributes.
"""
def __init__(self, existing_id, *args, **kwargs):
self.existing_id = existing_id
super().__init__(*args, **kwargs)
class EntityTransferError(PersistenceError):
"""Raised if an error occured during the transfer of an entity on the
backing persistence layer.
Should contain the original error that caused the failure, if
available.
""" | 0.931252 | 0.356167 |
import numpy as np
import paddle
from paddle.autograd.functional import vjp as _vjp
def ternary(cond, x, y):
expanding_dim = x.dim() - cond.dim()
assert expanding_dim >= 0
for _ in range(expanding_dim):
cond = cond.unsqueeze(-1)
if cond.shape != x.shape:
cond = cond.broadcast_to(x.shape)
return paddle.where(cond, x, y)
def vjp(f, x, v=None, create_graph=False):
r"""A single tensor version of VJP.
Args:
f (Callable): the objective function.
x (Tensor): the input tensor.
Returns:
(fval, gval):
fval: a tensor that holds the function value.
gval: a tensor that holds the function gradients.
"""
assert isinstance(x, paddle.Tensor), (
f'This BFGS optimizer applies to function of a single input tensor. '
f'The input however is a {type(x)}.'
)
fval, gval = _vjp(f, x, v=v, create_graph=create_graph)
assert isinstance(fval, paddle.Tensor), (
f'This BFGS optimizer only supports function returning a single output '
f'tensor. However, the function result is a {type(fval)}.'
)
return fval, gval[0]
def vnorm_p(x, p=2):
r"""p vector norm."""
return paddle.norm(x, p=p, axis=-1)
def vnorm_inf(x):
r"""Infinum vector norm."""
return paddle.norm(x, p=np.inf, axis=-1)
def matnorm(x):
r"""Matrix norm."""
return paddle.norm(x, 'fro')
def any_active(state):
return paddle.any(state == 0)
def all_active_with_predicates(state, *predicates):
r"""Tests whether all active states also satisfies the predicates.
Args:
state (Tensor): the search state of dtype int. For each element, 0
represents active state.
predicates (List[Tensor]): a list of boolean typed tensors of the
same shape with `state`.
Returns:
A scalar boolean tensor. True if the predicates are true for every
active state. Otherwise False.
"""
active_preds = active = active_state(state)
for p in predicates:
active_preds = paddle.logical_and(active_preds, p)
return paddle.all(active == active_preds)
def any_active_with_predicates(state, *predicates):
r"""Tests whether there's any active state also satisfies all the
predicates.
Args:
state (Tensor): the search state of dtype int. For each element, 0
represents active state.
predicates (List[Tensor]): a list of boolean typed tensors of the
same shape with `state`.
Returns:
A scalar boolean tensor. True if any element in `state` is active and
the corresponding predicate values are all True. Otherwise False.
"""
active_preds = active_state(state)
for p in predicates:
active_preds = paddle.logical_and(active_preds, p)
return paddle.any(active_preds)
def active_state(state):
return state == 0
def converged_state(state):
return state == 1
def failed_state(state):
return state == 2
def make_const(tensor_like, value, dtype=None):
r"""Makes a tensor filled with specified constant value.
Args:
tensor_like (Tensor): uses this tensor's shape and dtype to build
the output tensor.
value (float|boolean|int): fills the output tensor with this value.
dtype (Optional): specifies as the output tensor's dtype. Default is
None, in which case the output uses `tensor_like`'s dtype.
Returns:
The generated tensor with constant value `value` with the desired
dtype.
"""
if dtype is None:
dtype = tensor_like.dtype
return paddle.to_tensor(value, dtype).broadcast_to(tensor_like.shape)
def make_state(tensor_like, value='active'):
r"""Makes BFGS state tensor. Default is all zeros.
args:
tensor_like (Tensor): provides the shape of the result tensor.
value (str, optional): indicates the default value of the result
tensor. If `value` is 'active' then the result tensor is all
zeros. If `value` is 'converged' then the result tensor is all
ones. If `value` is 'failed' then the result tensor is all twos.
Default value is 'active'.
Returns:
Tensor wiht the same shape of `tensor_like`, of dtype `int`.
"""
# (FIXME) int8 is preferably a better choice but we found it's not
# consistently supported across Paddle APIs so we used int32 instead.
if value is 'active':
state = paddle.zeros_like(tensor_like, dtype='int32')
elif value is 'converged':
state = paddle.ones_like(tensor_like, dtype='int32')
else:
assert value is 'failed'
state = paddle.ones_like(tensor_like, dtype='int32') + 1
return state
def update_state(input_state, predicate, new_state):
r"""Updates the state on the locations where the old value is 0 and
corresponding predicate is True.
Args:
input_state (Tensor): the original state tensor.
predicate (Tensor): a tensor with the same shape of `input_state`, of
boolean type, indicating which locations should be updated.
new_state ('failed' | 'converged'): specifies the new state, either
'converged' or 'failed'.
Returns:
Tensor updated on the specified locations.
"""
assert new_state in ('converged', 'failed')
if new_state is 'converged':
increments = paddle.to_tensor(predicate, dtype='int32')
else:
increments = paddle.to_tensor(predicate, dtype='int32') * 2
output_state = paddle.where(input_state == 0, increments, input_state)
return output_state
def as_float_tensor(input, dtype=None):
r"""Generates a float or double typed tensor from `input`. The data
type of `input` is either float or double.
Args:
input(Scalar | List | Tensor): a scalar or a list of floats, or
a tensor with dtype float or double.
dtype('float' | 'float32' | 'float64' | 'double', Optional): the data
type of the result tensor. The default value is None.
Returns:
A tensor with the required dtype.
"""
assert isinstance(input, (int, float, list, paddle.Tensor)), (
f'Input `{input}` should be float, list or paddle.Tensor but found '
f'{type(input)}.'
)
if dtype in ('float', 'float32', paddle.float32):
dtype = 'float32'
elif dtype in ['float64', 'double', paddle.float64]:
dtype = 'float64'
else:
assert dtype is None
try:
output = paddle.to_tensor(input, dtype=dtype)
if output.dtype not in (paddle.float32, paddle.float64):
raise TypeError
except TypeError:
raise TypeError(f'The data type of {input} is {type(input)}, which is not supported.')
return output
class StopCounterException(Exception):
r"""raises this Exception on the event that increments a stopped
StopCounter.
"""
pass
class StopCounter(object):
r"""Defines a counter with a predefined end count.
"""
def __init__(self, end):
self.count = 0
assert isinstance(end, int) and end > 0
self.end = end
def increment(self):
r"""Increments the counter."""
if self.count < self.end:
self.count += 1
else:
raise StopCounterException() | python/paddle/incubate/optimizer/functional/bfgs_utils.py |
import numpy as np
import paddle
from paddle.autograd.functional import vjp as _vjp
def ternary(cond, x, y):
expanding_dim = x.dim() - cond.dim()
assert expanding_dim >= 0
for _ in range(expanding_dim):
cond = cond.unsqueeze(-1)
if cond.shape != x.shape:
cond = cond.broadcast_to(x.shape)
return paddle.where(cond, x, y)
def vjp(f, x, v=None, create_graph=False):
r"""A single tensor version of VJP.
Args:
f (Callable): the objective function.
x (Tensor): the input tensor.
Returns:
(fval, gval):
fval: a tensor that holds the function value.
gval: a tensor that holds the function gradients.
"""
assert isinstance(x, paddle.Tensor), (
f'This BFGS optimizer applies to function of a single input tensor. '
f'The input however is a {type(x)}.'
)
fval, gval = _vjp(f, x, v=v, create_graph=create_graph)
assert isinstance(fval, paddle.Tensor), (
f'This BFGS optimizer only supports function returning a single output '
f'tensor. However, the function result is a {type(fval)}.'
)
return fval, gval[0]
def vnorm_p(x, p=2):
r"""p vector norm."""
return paddle.norm(x, p=p, axis=-1)
def vnorm_inf(x):
r"""Infinum vector norm."""
return paddle.norm(x, p=np.inf, axis=-1)
def matnorm(x):
r"""Matrix norm."""
return paddle.norm(x, 'fro')
def any_active(state):
return paddle.any(state == 0)
def all_active_with_predicates(state, *predicates):
r"""Tests whether all active states also satisfies the predicates.
Args:
state (Tensor): the search state of dtype int. For each element, 0
represents active state.
predicates (List[Tensor]): a list of boolean typed tensors of the
same shape with `state`.
Returns:
A scalar boolean tensor. True if the predicates are true for every
active state. Otherwise False.
"""
active_preds = active = active_state(state)
for p in predicates:
active_preds = paddle.logical_and(active_preds, p)
return paddle.all(active == active_preds)
def any_active_with_predicates(state, *predicates):
r"""Tests whether there's any active state also satisfies all the
predicates.
Args:
state (Tensor): the search state of dtype int. For each element, 0
represents active state.
predicates (List[Tensor]): a list of boolean typed tensors of the
same shape with `state`.
Returns:
A scalar boolean tensor. True if any element in `state` is active and
the corresponding predicate values are all True. Otherwise False.
"""
active_preds = active_state(state)
for p in predicates:
active_preds = paddle.logical_and(active_preds, p)
return paddle.any(active_preds)
def active_state(state):
return state == 0
def converged_state(state):
return state == 1
def failed_state(state):
return state == 2
def make_const(tensor_like, value, dtype=None):
r"""Makes a tensor filled with specified constant value.
Args:
tensor_like (Tensor): uses this tensor's shape and dtype to build
the output tensor.
value (float|boolean|int): fills the output tensor with this value.
dtype (Optional): specifies as the output tensor's dtype. Default is
None, in which case the output uses `tensor_like`'s dtype.
Returns:
The generated tensor with constant value `value` with the desired
dtype.
"""
if dtype is None:
dtype = tensor_like.dtype
return paddle.to_tensor(value, dtype).broadcast_to(tensor_like.shape)
def make_state(tensor_like, value='active'):
r"""Makes BFGS state tensor. Default is all zeros.
args:
tensor_like (Tensor): provides the shape of the result tensor.
value (str, optional): indicates the default value of the result
tensor. If `value` is 'active' then the result tensor is all
zeros. If `value` is 'converged' then the result tensor is all
ones. If `value` is 'failed' then the result tensor is all twos.
Default value is 'active'.
Returns:
Tensor wiht the same shape of `tensor_like`, of dtype `int`.
"""
# (FIXME) int8 is preferably a better choice but we found it's not
# consistently supported across Paddle APIs so we used int32 instead.
if value is 'active':
state = paddle.zeros_like(tensor_like, dtype='int32')
elif value is 'converged':
state = paddle.ones_like(tensor_like, dtype='int32')
else:
assert value is 'failed'
state = paddle.ones_like(tensor_like, dtype='int32') + 1
return state
def update_state(input_state, predicate, new_state):
r"""Updates the state on the locations where the old value is 0 and
corresponding predicate is True.
Args:
input_state (Tensor): the original state tensor.
predicate (Tensor): a tensor with the same shape of `input_state`, of
boolean type, indicating which locations should be updated.
new_state ('failed' | 'converged'): specifies the new state, either
'converged' or 'failed'.
Returns:
Tensor updated on the specified locations.
"""
assert new_state in ('converged', 'failed')
if new_state is 'converged':
increments = paddle.to_tensor(predicate, dtype='int32')
else:
increments = paddle.to_tensor(predicate, dtype='int32') * 2
output_state = paddle.where(input_state == 0, increments, input_state)
return output_state
def as_float_tensor(input, dtype=None):
r"""Generates a float or double typed tensor from `input`. The data
type of `input` is either float or double.
Args:
input(Scalar | List | Tensor): a scalar or a list of floats, or
a tensor with dtype float or double.
dtype('float' | 'float32' | 'float64' | 'double', Optional): the data
type of the result tensor. The default value is None.
Returns:
A tensor with the required dtype.
"""
assert isinstance(input, (int, float, list, paddle.Tensor)), (
f'Input `{input}` should be float, list or paddle.Tensor but found '
f'{type(input)}.'
)
if dtype in ('float', 'float32', paddle.float32):
dtype = 'float32'
elif dtype in ['float64', 'double', paddle.float64]:
dtype = 'float64'
else:
assert dtype is None
try:
output = paddle.to_tensor(input, dtype=dtype)
if output.dtype not in (paddle.float32, paddle.float64):
raise TypeError
except TypeError:
raise TypeError(f'The data type of {input} is {type(input)}, which is not supported.')
return output
class StopCounterException(Exception):
r"""raises this Exception on the event that increments a stopped
StopCounter.
"""
pass
class StopCounter(object):
r"""Defines a counter with a predefined end count.
"""
def __init__(self, end):
self.count = 0
assert isinstance(end, int) and end > 0
self.end = end
def increment(self):
r"""Increments the counter."""
if self.count < self.end:
self.count += 1
else:
raise StopCounterException() | 0.927306 | 0.637496 |
import base64
import json
import os
from django.conf import settings
from waiting import wait, TimeoutExpired
from sandbox.apps.devices.core.emulators import emulators_controller
from sandbox.libs.logs.core.ws import ws_log_message
from sandbox.apps.provision.core.common import provision_generated_predicate
from sandbox.libs.context.models import SandboxActions
from sandbox.apps.devices.models import Device
from sandbox.libs.cmd_process import CmdProcessInteractive
from sandbox.libs.decorators import threaded_action
from sandbox.libs.logger import logger_by_name
from sandbox.libs.utils import find_files
REQUESTS_FILE = os.path.join(settings.SANDBOX_TEMP_DIR, 'card-requests.txt')
DEVICE_INFO_FILE = os.path.join(settings.SANDBOX_TEMP_DIR, 'device-info.txt')
@threaded_action(action=SandboxActions.PERFORM_PROVISION,
forbidden_on=[SandboxActions.GENERATE_PROVISION,
SandboxActions.PERFORM_PROVISION,
SandboxActions.REGISTER_DEVICES,
SandboxActions.PUBLISH_NEW_TRUST_LIST],
predicate=provision_generated_predicate)
def perform_provision():
"""
1) Start initializers emulators, if needed
2) Perform provision, by launching Virgil Device Initializer
"""
# Find devices are needed to be provisioned
to_be_provisioned = [d for d in Device.objects.all() if d.is_ready_for_provision]
# Prepare list of mac addresses of emulators
macs = [d.mac for d in to_be_provisioned if d.is_emulator]
try:
# Start initializers emulators
emulators_controller.start_initializers(macs)
# Start Device Initializer
initializer = _get_initializer_process()
initializer.run_in_thread()
# Wait for Initializer return code
timeout = len(to_be_provisioned) * 10 # give up to 10 seconds to each initializer
try:
wait(lambda: initializer.return_code == 0,
timeout_seconds=timeout,
waiting_for='Initializer successfully finished')
except TimeoutExpired:
raise Exception(f'Failed to initialize devices during {timeout} seconds\n'
f'Virgil Device Initializer return code: {initializer.return_code}')
finally:
emulators_controller.stop_initializers(macs)
def _get_initializer_process():
pp_dir = settings.SANDBOX_PROVISION_PACK_DIR
# Prepare params for initializer launch
auth_pub_key_1, auth_pub_key_2 = find_files(pp_dir, 'auth_.*pub', regex=True)
rec_pub_key_1, rec_pub_key_2 = find_files(pp_dir, 'recovery_.*pub', regex=True)
tl_pub_key_1, tl_pub_key_2 = find_files(pp_dir, 'tl_.*pub', regex=True)
fw_pub_key_1, fw_pub_key_2 = find_files(pp_dir, 'firmware_.*pub', regex=True)
trust_list, *_ = find_files(pp_dir, 'TrustList_')
factory_key, *_ = find_files(pp_dir, 'factory_.*key', regex=True)
# Prepare process
cmd = (
f'virgil-device-initializer '
f'--output "{REQUESTS_FILE}" '
f'--device_info_output "{DEVICE_INFO_FILE}" '
f'--auth_pub_key_1 "{auth_pub_key_1}" '
f'--auth_pub_key_2 "{auth_pub_key_2}" '
f'--rec_pub_key_1 "{rec_pub_key_1}" '
f'--rec_pub_key_2 "{rec_pub_key_2}" '
f'--tl_pub_key_1 "{tl_pub_key_1}" '
f'--tl_pub_key_2 "{tl_pub_key_2}" '
f'--fw_pub_key_1 "{fw_pub_key_1}" '
f'--fw_pub_key_2 "{fw_pub_key_2}" '
f'--trust_list "{trust_list}" '
f'--factory_key "{factory_key}"'
)
logger = logger_by_name('virgil-device-initializer')
process = CmdProcessInteractive(cmd, logger=logger)
_register_callbacks(process)
return process
def _register_callbacks(process):
def on_initialized(line):
card_request_b64 = line.split()[-1].strip()
# Get mac of initialized thing
card_request = base64.b64decode(card_request_b64).decode()
signatures = json.loads(card_request).get('signatures')
self_sign, *_ = [s for s in signatures if s.get('signer') == 'self']
snapshot_b64 = self_sign.get('snapshot')
snapshot = base64.b64decode(snapshot_b64).decode()
mac = json.loads(snapshot).get('mac').lower()
if not mac:
raise Exception(f'Failed to extract mac from card request: {card_request_b64}')
# Update device in model
device = Device.objects.get(mac=mac)
device.on_initialized(card_request_b64)
# Log
ws_log_message(f'Successfully initialized: {device.device_type} {mac}')
# Assumed that when event occurs in log - device is initialized
process.register_callback_on_event(on_initialized, 'Card request:') | iot-sandbox/sandbox/sandbox/apps/devices/core/provision.py | import base64
import json
import os
from django.conf import settings
from waiting import wait, TimeoutExpired
from sandbox.apps.devices.core.emulators import emulators_controller
from sandbox.libs.logs.core.ws import ws_log_message
from sandbox.apps.provision.core.common import provision_generated_predicate
from sandbox.libs.context.models import SandboxActions
from sandbox.apps.devices.models import Device
from sandbox.libs.cmd_process import CmdProcessInteractive
from sandbox.libs.decorators import threaded_action
from sandbox.libs.logger import logger_by_name
from sandbox.libs.utils import find_files
REQUESTS_FILE = os.path.join(settings.SANDBOX_TEMP_DIR, 'card-requests.txt')
DEVICE_INFO_FILE = os.path.join(settings.SANDBOX_TEMP_DIR, 'device-info.txt')
@threaded_action(action=SandboxActions.PERFORM_PROVISION,
forbidden_on=[SandboxActions.GENERATE_PROVISION,
SandboxActions.PERFORM_PROVISION,
SandboxActions.REGISTER_DEVICES,
SandboxActions.PUBLISH_NEW_TRUST_LIST],
predicate=provision_generated_predicate)
def perform_provision():
"""
1) Start initializers emulators, if needed
2) Perform provision, by launching Virgil Device Initializer
"""
# Find devices are needed to be provisioned
to_be_provisioned = [d for d in Device.objects.all() if d.is_ready_for_provision]
# Prepare list of mac addresses of emulators
macs = [d.mac for d in to_be_provisioned if d.is_emulator]
try:
# Start initializers emulators
emulators_controller.start_initializers(macs)
# Start Device Initializer
initializer = _get_initializer_process()
initializer.run_in_thread()
# Wait for Initializer return code
timeout = len(to_be_provisioned) * 10 # give up to 10 seconds to each initializer
try:
wait(lambda: initializer.return_code == 0,
timeout_seconds=timeout,
waiting_for='Initializer successfully finished')
except TimeoutExpired:
raise Exception(f'Failed to initialize devices during {timeout} seconds\n'
f'Virgil Device Initializer return code: {initializer.return_code}')
finally:
emulators_controller.stop_initializers(macs)
def _get_initializer_process():
pp_dir = settings.SANDBOX_PROVISION_PACK_DIR
# Prepare params for initializer launch
auth_pub_key_1, auth_pub_key_2 = find_files(pp_dir, 'auth_.*pub', regex=True)
rec_pub_key_1, rec_pub_key_2 = find_files(pp_dir, 'recovery_.*pub', regex=True)
tl_pub_key_1, tl_pub_key_2 = find_files(pp_dir, 'tl_.*pub', regex=True)
fw_pub_key_1, fw_pub_key_2 = find_files(pp_dir, 'firmware_.*pub', regex=True)
trust_list, *_ = find_files(pp_dir, 'TrustList_')
factory_key, *_ = find_files(pp_dir, 'factory_.*key', regex=True)
# Prepare process
cmd = (
f'virgil-device-initializer '
f'--output "{REQUESTS_FILE}" '
f'--device_info_output "{DEVICE_INFO_FILE}" '
f'--auth_pub_key_1 "{auth_pub_key_1}" '
f'--auth_pub_key_2 "{auth_pub_key_2}" '
f'--rec_pub_key_1 "{rec_pub_key_1}" '
f'--rec_pub_key_2 "{rec_pub_key_2}" '
f'--tl_pub_key_1 "{tl_pub_key_1}" '
f'--tl_pub_key_2 "{tl_pub_key_2}" '
f'--fw_pub_key_1 "{fw_pub_key_1}" '
f'--fw_pub_key_2 "{fw_pub_key_2}" '
f'--trust_list "{trust_list}" '
f'--factory_key "{factory_key}"'
)
logger = logger_by_name('virgil-device-initializer')
process = CmdProcessInteractive(cmd, logger=logger)
_register_callbacks(process)
return process
def _register_callbacks(process):
def on_initialized(line):
card_request_b64 = line.split()[-1].strip()
# Get mac of initialized thing
card_request = base64.b64decode(card_request_b64).decode()
signatures = json.loads(card_request).get('signatures')
self_sign, *_ = [s for s in signatures if s.get('signer') == 'self']
snapshot_b64 = self_sign.get('snapshot')
snapshot = base64.b64decode(snapshot_b64).decode()
mac = json.loads(snapshot).get('mac').lower()
if not mac:
raise Exception(f'Failed to extract mac from card request: {card_request_b64}')
# Update device in model
device = Device.objects.get(mac=mac)
device.on_initialized(card_request_b64)
# Log
ws_log_message(f'Successfully initialized: {device.device_type} {mac}')
# Assumed that when event occurs in log - device is initialized
process.register_callback_on_event(on_initialized, 'Card request:') | 0.427516 | 0.097691 |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
depends_on = (
('bluebottle.payments_logger', '0001_initial'),
('bluebottle.payments_docdata', '0002_auto__add_field_docdatapayment_customer_id__add_field_docdatapayment_e'),
)
def forwards(self, orm):
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
# Create a lookup for new docdata payments
ddps = orm['payments_docdata.DocdataPayment'].objects.all().values_list('payment_cluster_id', 'id')
dd_payments = {}
for ddp in ddps:
dd_payments[ddp[0]] = ddp[1]
count = 0
total = orm['cowry_docdata.DocDataPaymentLogEntry'].objects.count()
for i, log_entry_model in enumerate(orm['cowry_docdata.DocDataPaymentLogEntry'].objects.iterator()):
if not i % 50:
print "Processing DocdataPaymentLogEntry {0} of {1}".format(i, total)
# Fetch DocDataPaymentOrder
old_docdata_payment_order = log_entry_model.docdata_payment_order
# Fetch corresponding DocdataPayment
if old_docdata_payment_order.merchant_order_reference in dd_payments:
new_docdata_payment_id = dd_payments[old_docdata_payment_order.merchant_order_reference]
else:
count +=1
msg = "No new DocdataPayment object found for the old DocdataPaymentOrder object. DocdataPaymentOrder ID: {0} DocDataPaymentLogEntry ID: {1}".format(old_docdata_payment_order.id, log_entry_model.id)
print msg
continue
# Create new PaymentLogEntry using the old DocDataPaymentLogEntry data
payment_log_entry = orm['payments_logger.PaymentLogEntry'].objects.create(
message=log_entry_model.message,
level=log_entry_model.level,
timestamp=log_entry_model.timestamp,
payment_id=new_docdata_payment_id
)
payment_log_entry.save()
if not i % 50:
print "PaymentLogEntry {0} created".format(i)
print "PaymentLogEntries without DocdataPayment: {0}".format(count)
def backwards(self, orm):
orm['payments_logger.PaymentLogEntry'].objects.all().delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cowry.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}),
'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['fund.Order']"}),
'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPayment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'docdata_payments'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry_docdata.docdatapayment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'DocDataPaymentLogEntry'},
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log_entries'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentorder': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPaymentOrder', '_ormbases': [u'cowry.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_reference': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'})
},
u'cowry_docdata.docdatawebdirectdirectdebit': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataWebDirectDirectDebit', '_ormbases': [u'cowry_docdata.DocDataPayment']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11'}),
u'docdatapayment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry_docdata.DocDataPayment']", 'unique': 'True', 'primary_key': 'True'}),
'iban': ('django_iban.fields.IBANField', [], {'max_length': '34'})
},
u'fund.donation': {
'Meta': {'object_name': MODEL_MAP['donation']['class']},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'one_off'", 'max_length': '20', 'db_index': 'True'}),
'fundraiser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_donations'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['fundraiser']['model'])}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donations'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'old_donations'", 'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'ready': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'}),
'voucher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vouchers.Voucher']", 'null': 'True', 'blank': 'True'})
},
u'fund.order': {
'Meta': {'ordering': "('-updated',)", 'object_name': MODEL_MAP['order']['class']},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'current'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_orders'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])})
},
u'fund.recurringdirectdebitpayment': {
'Meta': {'object_name': 'RecurringDirectDebitPayment'},
'account': ('apps.fund.fields.DutchBankAccountField', [], {'max_length': '10'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'default': "''", 'max_length': '11', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'iban': ('django_iban.fields.IBANField', [], {'default': "''", 'max_length': '34', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_process': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'unique': 'True'})
},
MODEL_MAP['fundraiser']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['fundraiser']['class']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
MODEL_MAP['user']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['user']['class']},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'available_time': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skypename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
MODEL_MAP['order']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['order']['class']},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': "'100'", 'null': 'True', 'blank': 'True'}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '16', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'})
},
MODEL_MAP['organization']['model_lower']: {
'Meta': {'ordering': "['name']", 'object_name': MODEL_MAP['organization']['class']},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_bank_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_holder_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_holder_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_holder_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_line1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'payments.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '2'}),
'authorization_action': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.OrderPaymentAction']", 'unique': 'True', 'null': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'integration_data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'max_length': '5000', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_payments'", 'to': "orm['{0}']".format(MODEL_MAP['order']['model'])}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}),
'transaction_fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'})
},
u'payments.orderpaymentaction': {
'Meta': {'object_name': 'OrderPaymentAction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'payload': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'})
},
u'payments.payment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'Payment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_payment': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.OrderPayment']", 'unique': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_payments.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'started'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payments.transaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'Transaction'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Payment']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_payments.transaction_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payments_docdata.docdatadirectdebittransaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataDirectDebitTransaction', '_ormbases': [u'payments.Transaction']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
u'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
u'payments_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocdataPayment', '_ormbases': [u'payments.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'default_pm': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'ideal_issuer_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'ip_address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '5', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'payment_cluster_id': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '200'}),
'payment_cluster_key': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '200'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'total_acquirer_approved': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_acquirer_pending': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_captured': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_charged_back': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_gross_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '15', 'decimal_places': '2'}),
'total_refunded': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_registered': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_shopper_pending': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'})
},
u'payments_docdata.docdatatransaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocdataTransaction', '_ormbases': [u'payments.Transaction']},
'authorization_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'authorization_currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'authorization_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'capture_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'capture_currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'capture_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'docdata_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
u'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
u'payments_logger.paymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'PaymentLogEntry'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['payments.Payment']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'projects.partnerorganization': {
'Meta': {'object_name': 'PartnerOrganization'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
MODEL_MAP['project']['model_lower']: {
'Meta': {'ordering': "['title']", 'object_name': MODEL_MAP['project']['class']},
'allow_overfunding': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'amount_asked': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'amount_donated': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'amount_needed': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'campaign_ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_funded': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'effects': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'for_who': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'future': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'is_campaign': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Language']", 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'mchanga_account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['organization']['model'])}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'popularity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'skip_monthly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectPhase']"}),
'story': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'utils.language': {
'Meta': {'ordering': "['language_name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'native_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vouchers.voucher': {
'Meta': {'object_name': 'Voucher'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchers'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'receiver_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sender'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'sender_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'sender_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['payments', 'payments_docdata', 'payments_logger', 'cowry_docdata', 'fund']
symmetrical = True | apps/fund/migrations/0005_migrate_paymentlogs.py | import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
depends_on = (
('bluebottle.payments_logger', '0001_initial'),
('bluebottle.payments_docdata', '0002_auto__add_field_docdatapayment_customer_id__add_field_docdatapayment_e'),
)
def forwards(self, orm):
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
# Create a lookup for new docdata payments
ddps = orm['payments_docdata.DocdataPayment'].objects.all().values_list('payment_cluster_id', 'id')
dd_payments = {}
for ddp in ddps:
dd_payments[ddp[0]] = ddp[1]
count = 0
total = orm['cowry_docdata.DocDataPaymentLogEntry'].objects.count()
for i, log_entry_model in enumerate(orm['cowry_docdata.DocDataPaymentLogEntry'].objects.iterator()):
if not i % 50:
print "Processing DocdataPaymentLogEntry {0} of {1}".format(i, total)
# Fetch DocDataPaymentOrder
old_docdata_payment_order = log_entry_model.docdata_payment_order
# Fetch corresponding DocdataPayment
if old_docdata_payment_order.merchant_order_reference in dd_payments:
new_docdata_payment_id = dd_payments[old_docdata_payment_order.merchant_order_reference]
else:
count +=1
msg = "No new DocdataPayment object found for the old DocdataPaymentOrder object. DocdataPaymentOrder ID: {0} DocDataPaymentLogEntry ID: {1}".format(old_docdata_payment_order.id, log_entry_model.id)
print msg
continue
# Create new PaymentLogEntry using the old DocDataPaymentLogEntry data
payment_log_entry = orm['payments_logger.PaymentLogEntry'].objects.create(
message=log_entry_model.message,
level=log_entry_model.level,
timestamp=log_entry_model.timestamp,
payment_id=new_docdata_payment_id
)
payment_log_entry.save()
if not i % 50:
print "PaymentLogEntry {0} created".format(i)
print "PaymentLogEntries without DocdataPayment: {0}".format(count)
def backwards(self, orm):
orm['payments_logger.PaymentLogEntry'].objects.all().delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cowry.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}),
'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['fund.Order']"}),
'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPayment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'docdata_payments'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry_docdata.docdatapayment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'DocDataPaymentLogEntry'},
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log_entries'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentorder': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPaymentOrder', '_ormbases': [u'cowry.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_reference': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'})
},
u'cowry_docdata.docdatawebdirectdirectdebit': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataWebDirectDirectDebit', '_ormbases': [u'cowry_docdata.DocDataPayment']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11'}),
u'docdatapayment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry_docdata.DocDataPayment']", 'unique': 'True', 'primary_key': 'True'}),
'iban': ('django_iban.fields.IBANField', [], {'max_length': '34'})
},
u'fund.donation': {
'Meta': {'object_name': MODEL_MAP['donation']['class']},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'one_off'", 'max_length': '20', 'db_index': 'True'}),
'fundraiser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_donations'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['fundraiser']['model'])}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donations'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'old_donations'", 'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'ready': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'}),
'voucher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vouchers.Voucher']", 'null': 'True', 'blank': 'True'})
},
u'fund.order': {
'Meta': {'ordering': "('-updated',)", 'object_name': MODEL_MAP['order']['class']},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'current'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_orders'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])})
},
u'fund.recurringdirectdebitpayment': {
'Meta': {'object_name': 'RecurringDirectDebitPayment'},
'account': ('apps.fund.fields.DutchBankAccountField', [], {'max_length': '10'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'default': "''", 'max_length': '11', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'iban': ('django_iban.fields.IBANField', [], {'default': "''", 'max_length': '34', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_process': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'unique': 'True'})
},
MODEL_MAP['fundraiser']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['fundraiser']['class']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
MODEL_MAP['user']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['user']['class']},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'available_time': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skypename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
MODEL_MAP['order']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['order']['class']},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': "'100'", 'null': 'True', 'blank': 'True'}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '16', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'})
},
MODEL_MAP['organization']['model_lower']: {
'Meta': {'ordering': "['name']", 'object_name': MODEL_MAP['organization']['class']},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_bank_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_holder_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_holder_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_holder_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_line1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'payments.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '2'}),
'authorization_action': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.OrderPaymentAction']", 'unique': 'True', 'null': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'integration_data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'max_length': '5000', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_payments'", 'to': "orm['{0}']".format(MODEL_MAP['order']['model'])}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}),
'transaction_fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'})
},
u'payments.orderpaymentaction': {
'Meta': {'object_name': 'OrderPaymentAction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'payload': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'})
},
u'payments.payment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'Payment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_payment': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.OrderPayment']", 'unique': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_payments.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'started'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payments.transaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'Transaction'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Payment']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_payments.transaction_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payments_docdata.docdatadirectdebittransaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataDirectDebitTransaction', '_ormbases': [u'payments.Transaction']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
u'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
u'payments_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocdataPayment', '_ormbases': [u'payments.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'default_pm': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'ideal_issuer_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'ip_address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '5', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'payment_cluster_id': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '200'}),
'payment_cluster_key': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '200'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'total_acquirer_approved': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_acquirer_pending': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_captured': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_charged_back': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_gross_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '15', 'decimal_places': '2'}),
'total_refunded': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_registered': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_shopper_pending': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'})
},
u'payments_docdata.docdatatransaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocdataTransaction', '_ormbases': [u'payments.Transaction']},
'authorization_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'authorization_currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'authorization_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'capture_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'capture_currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'capture_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'docdata_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
u'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
u'payments_logger.paymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'PaymentLogEntry'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['payments.Payment']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'projects.partnerorganization': {
'Meta': {'object_name': 'PartnerOrganization'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
MODEL_MAP['project']['model_lower']: {
'Meta': {'ordering': "['title']", 'object_name': MODEL_MAP['project']['class']},
'allow_overfunding': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'amount_asked': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'amount_donated': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'amount_needed': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'campaign_ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_funded': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'effects': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'for_who': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'future': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'is_campaign': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Language']", 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'mchanga_account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['organization']['model'])}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'popularity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'skip_monthly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectPhase']"}),
'story': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'utils.language': {
'Meta': {'ordering': "['language_name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'native_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vouchers.voucher': {
'Meta': {'object_name': 'Voucher'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchers'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'receiver_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sender'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'sender_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'sender_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['payments', 'payments_docdata', 'payments_logger', 'cowry_docdata', 'fund']
symmetrical = True | 0.432543 | 0.116387 |
__version__ = '0.1'
__versionTime__ = '2013-03-29'
__author__ = '<NAME> <<EMAIL>>'
__doc__ = '''
pybass_sfx.py - is ctypes python module for
BASS_SFX - An extension allowing the use of Sonique, Winamp,
Windows Media Player, and BassBox visual plugins with BASS.
'''
import ctypes
try:
import bass
import pybass
except ImportError:
from .import bass
from .import pybass
bass_vst_module, func_type = bass.load(__file__)
# ~ from ctypes.wintypes import HINSTANCE
# ~ from ctypes.wintypes import HWND
# ~ from ctypes.wintypes import LPCWSTR
# ~ from ctypes.wintypes import HDC
HSTREAM = pybass.HSTREAM
BASS_FILEPROCS = pybass.BASS_FILEPROCS
HSFX = ctypes.c_long
# visualization plugin types
BASS_SFX_SONIQUE = 0
BASS_SFX_WINAMP = 1
BASS_SFX_WMP = 2
BASS_SFX_BBP = 3
# PluginCreate Flags
BASS_SFX_SONIQUE_OPENGL = 1 # render sonique plugins using OpenGL
BASS_SFX_SONIQUE_OPENGL_DOUBLEBUFFER = 2 # use OpenGL double buffering
# Error codes returned by BASS_SFX_ErrorGetCode
BASS_SFX_OK = 0 # all is OK
BASS_SFX_ERROR_MEM = 1 # memory error
BASS_SFX_ERROR_FILEOPEN = 2 # can't open the file
BASS_SFX_ERROR_HANDLE = 3 # invalid handle
BASS_SFX_ERROR_ALREADY = 4 # already initialized
BASS_SFX_ERROR_FORMAT = 5 # unsupported plugin format
BASS_SFX_ERROR_INIT = 6 # BASS_SFX_Init has not been successfully called
BASS_SFX_ERROR_GUID = 7 # can't open WMP plugin using specified GUID
BASS_SFX_ERROR_UNKNOWN = -1 # some other mystery problem
# LPCWSTR = c_long_p = ctypes.POINTER(c_long)
# Windows Media Player Specific
class BASS_SFX_PLUGININFO(ctypes.Structure):
_fields_ = [
('name', ctypes.c_char_p),
('clsid', ctypes.c_char_p),
]
class BASS_SFX_PLUGINFOW(ctypes.Structure):
_fields_ = [
('name', ctypes.c_wchar_p),
('clsid', ctypes.c_wchar_p),
]
# BOOL SFXDEF(BASS_SFX_WMP_GetPlugin)(int index, BASS_SFX_PLUGININFO* info);
BASS_SFX_WMP_GetPlugin = func_type(
HSTREAM, ctypes.c_int, ctypes.POINTER(BASS_SFX_PLUGININFO)
)(('BASS_SFX_WMP_GetPlugin', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_WMP_GetPluginW)(int index, BASS_SFX_PLUGININFOW* info);
BASS_SFX_WMP_GetPluginW = func_type(
HSTREAM, ctypes.c_int, ctypes.POINTER(BASS_SFX_PLUGINFOW)
)(('BASS_SFX_WMP_GetPluginW', bass_vst_module))
# DWORD SFXDEF(BASS_SFX_GetVersion)();
BASS_SFX_GetVersion = func_type(
HSTREAM
)(('BASS_SFX_GetVersion', bass_vst_module))
# DWORD SFXDEF(BASS_SFX_ErrorGetCode)();
BASS_SFX_ErrorGetCode = func_type(
HSTREAM
)(('BASS_SFX_ErrorGetCode', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_Init)(HINSTANCE hInstance, HWND hWnd);
BASS_SFX_Init = func_type(
HSTREAM, ctypes.c_void_p, ctypes.c_void_p
)(('BASS_SFX_Init', bass_vst_module))
# DWORD SFXDEF(BASS_SFX_PluginFlags)(HSFX handle, DWORD flags, DWORD mask);
BASS_SFX_PluginFlags = func_type(
HSTREAM, HSFX, ctypes.c_ulong, ctypes.c_ulong
)(('BASS_SFX_PluginFlags', bass_vst_module))
# HSFX SFXDEF(BASS_SFX_PluginCreate)(char* strPath, HWND hPluginWnd, int nWidth, int nHeight, DWORD flags);
BASS_SFX_PluginCreate = func_type(
HSTREAM, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_ulong
)(('BASS_SFX_PluginCreate', bass_vst_module))
# HSFX SFXDEF(BASS_SFX_PluginCreateW)(LPCWSTR strPath, HWND hPluginWnd, int nWidth, int nHeight, DWORD flags);
BASS_SFX_PluginCreateW = func_type(
HSTREAM, ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_ulong
)(('BASS_SFX_PluginCreate', bass_vst_module))
# int SFXDEF(BASS_SFX_PluginGetType)(HSFX handle);
BASS_SFX_PluginGetType = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginGetType', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginSetStream)(HSFX handle, HSTREAM hStream);
BASS_SFX_PluginSetStream = func_type(
HSTREAM, HSFX, HSTREAM
)(('BASS_SFX_PluginSetStream', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginStart)(HSFX handle);
BASS_SFX_PluginStart = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginStart', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginStop)(HSFX handle);
BASS_SFX_PluginStop = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginStop', bass_vst_module))
# char* SFXDEF(BASS_SFX_PluginGetName)(HSFX handle);
BASS_SFX_PluginGetName = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginGetName', bass_vst_module))
# char* SFXDEF(BASS_SFX_PluginGetNameW)(HSFX handle);
BASS_SFX_PluginGetNameW = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginGetNameW', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginConfig)(HSFX handle);
BASS_SFX_PluginConfig = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginConfig', bass_vst_module))
# int SFXDEF(BASS_SFX_PluginModuleGetCount)(HSFX handle);
BASS_SFX_PluginModuleGetCount = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginModuleGetCount', bass_vst_module))
# char* SFXDEF(BASS_SFX_PluginModuleGetName)(HSFX handle, int module);
BASS_SFX_PluginModuleGetName = func_type(
HSTREAM, HSFX, ctypes.c_int
)(('BASS_SFX_PluginModuleGetName', bass_vst_module))
# LPCWSTR SFXDEF(BASS_SFX_PluginModuleGetNameW)(HSFX handle, int module);
BASS_SFX_PluginModuleGetNameW = func_type(
HSTREAM, HSFX, ctypes.c_int
)(('BASS_SFX_PluginModuleGetNameW', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginModuleSetActive)(HSFX handle, int module);
BASS_SFX_PluginModuleSetActive = func_type(
HSTREAM, HSFX, ctypes.c_int
)(('BASS_SFX_PluginModuleSetActive', bass_vst_module))
# int SFXDEF(BASS_SFX_PluginModuleGetActive)(HSFX handle);
BASS_SFX_PluginModuleGetActive = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginModuleGetActive', bass_vst_module))
# HBITMAP SFXDEF(BASS_SFX_PluginRender)(HSFX handle, HSTREAM hStream, HDC hDC); //only sonique, bassbox, or WMP
BASS_SFX_PluginRender = func_type(
HSTREAM, HSFX, HSTREAM, ctypes.c_void_p
)(('BASS_SFX_PluginRender', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginClicked)(HSFX handle, int x, int y);
BASS_SFX_PluginClicked = func_type(
HSTREAM, HSFX, ctypes.c_int, ctypes.c_int
)(('BASS_SFX_PluginClicked', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginResize)(HSFX handle, int nWidth, int nHeight);
BASS_SFX_PluginResize = func_type(
HSTREAM, HSFX, ctypes.c_int, ctypes.c_int
)(('BASS_SFX_PluginResize', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginResizeMove)(HSFX handle, int nLeft, int nTop, int nWidth, int nHeight);
BASS_SFX_PluginResizeMove = func_type(
HSTREAM, HSFX, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int
)(('BASS_SFX_PluginResizeMove', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginFree)(HSFX handle);
BASS_SFX_PluginFree = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginFree', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_Free)();
BASS_SFX_Free = func_type(
HSTREAM
)(('BASS_SFX_Free', bass_vst_module)) | modpybass/pybass_sfx.py |
__version__ = '0.1'
__versionTime__ = '2013-03-29'
__author__ = '<NAME> <<EMAIL>>'
__doc__ = '''
pybass_sfx.py - is ctypes python module for
BASS_SFX - An extension allowing the use of Sonique, Winamp,
Windows Media Player, and BassBox visual plugins with BASS.
'''
import ctypes
try:
import bass
import pybass
except ImportError:
from .import bass
from .import pybass
bass_vst_module, func_type = bass.load(__file__)
# ~ from ctypes.wintypes import HINSTANCE
# ~ from ctypes.wintypes import HWND
# ~ from ctypes.wintypes import LPCWSTR
# ~ from ctypes.wintypes import HDC
HSTREAM = pybass.HSTREAM
BASS_FILEPROCS = pybass.BASS_FILEPROCS
HSFX = ctypes.c_long
# visualization plugin types
BASS_SFX_SONIQUE = 0
BASS_SFX_WINAMP = 1
BASS_SFX_WMP = 2
BASS_SFX_BBP = 3
# PluginCreate Flags
BASS_SFX_SONIQUE_OPENGL = 1 # render sonique plugins using OpenGL
BASS_SFX_SONIQUE_OPENGL_DOUBLEBUFFER = 2 # use OpenGL double buffering
# Error codes returned by BASS_SFX_ErrorGetCode
BASS_SFX_OK = 0 # all is OK
BASS_SFX_ERROR_MEM = 1 # memory error
BASS_SFX_ERROR_FILEOPEN = 2 # can't open the file
BASS_SFX_ERROR_HANDLE = 3 # invalid handle
BASS_SFX_ERROR_ALREADY = 4 # already initialized
BASS_SFX_ERROR_FORMAT = 5 # unsupported plugin format
BASS_SFX_ERROR_INIT = 6 # BASS_SFX_Init has not been successfully called
BASS_SFX_ERROR_GUID = 7 # can't open WMP plugin using specified GUID
BASS_SFX_ERROR_UNKNOWN = -1 # some other mystery problem
# LPCWSTR = c_long_p = ctypes.POINTER(c_long)
# Windows Media Player Specific
class BASS_SFX_PLUGININFO(ctypes.Structure):
_fields_ = [
('name', ctypes.c_char_p),
('clsid', ctypes.c_char_p),
]
class BASS_SFX_PLUGINFOW(ctypes.Structure):
_fields_ = [
('name', ctypes.c_wchar_p),
('clsid', ctypes.c_wchar_p),
]
# BOOL SFXDEF(BASS_SFX_WMP_GetPlugin)(int index, BASS_SFX_PLUGININFO* info);
BASS_SFX_WMP_GetPlugin = func_type(
HSTREAM, ctypes.c_int, ctypes.POINTER(BASS_SFX_PLUGININFO)
)(('BASS_SFX_WMP_GetPlugin', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_WMP_GetPluginW)(int index, BASS_SFX_PLUGININFOW* info);
BASS_SFX_WMP_GetPluginW = func_type(
HSTREAM, ctypes.c_int, ctypes.POINTER(BASS_SFX_PLUGINFOW)
)(('BASS_SFX_WMP_GetPluginW', bass_vst_module))
# DWORD SFXDEF(BASS_SFX_GetVersion)();
BASS_SFX_GetVersion = func_type(
HSTREAM
)(('BASS_SFX_GetVersion', bass_vst_module))
# DWORD SFXDEF(BASS_SFX_ErrorGetCode)();
BASS_SFX_ErrorGetCode = func_type(
HSTREAM
)(('BASS_SFX_ErrorGetCode', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_Init)(HINSTANCE hInstance, HWND hWnd);
BASS_SFX_Init = func_type(
HSTREAM, ctypes.c_void_p, ctypes.c_void_p
)(('BASS_SFX_Init', bass_vst_module))
# DWORD SFXDEF(BASS_SFX_PluginFlags)(HSFX handle, DWORD flags, DWORD mask);
BASS_SFX_PluginFlags = func_type(
HSTREAM, HSFX, ctypes.c_ulong, ctypes.c_ulong
)(('BASS_SFX_PluginFlags', bass_vst_module))
# HSFX SFXDEF(BASS_SFX_PluginCreate)(char* strPath, HWND hPluginWnd, int nWidth, int nHeight, DWORD flags);
BASS_SFX_PluginCreate = func_type(
HSTREAM, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_ulong
)(('BASS_SFX_PluginCreate', bass_vst_module))
# HSFX SFXDEF(BASS_SFX_PluginCreateW)(LPCWSTR strPath, HWND hPluginWnd, int nWidth, int nHeight, DWORD flags);
BASS_SFX_PluginCreateW = func_type(
HSTREAM, ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_ulong
)(('BASS_SFX_PluginCreate', bass_vst_module))
# int SFXDEF(BASS_SFX_PluginGetType)(HSFX handle);
BASS_SFX_PluginGetType = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginGetType', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginSetStream)(HSFX handle, HSTREAM hStream);
BASS_SFX_PluginSetStream = func_type(
HSTREAM, HSFX, HSTREAM
)(('BASS_SFX_PluginSetStream', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginStart)(HSFX handle);
BASS_SFX_PluginStart = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginStart', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginStop)(HSFX handle);
BASS_SFX_PluginStop = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginStop', bass_vst_module))
# char* SFXDEF(BASS_SFX_PluginGetName)(HSFX handle);
BASS_SFX_PluginGetName = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginGetName', bass_vst_module))
# char* SFXDEF(BASS_SFX_PluginGetNameW)(HSFX handle);
BASS_SFX_PluginGetNameW = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginGetNameW', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginConfig)(HSFX handle);
BASS_SFX_PluginConfig = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginConfig', bass_vst_module))
# int SFXDEF(BASS_SFX_PluginModuleGetCount)(HSFX handle);
BASS_SFX_PluginModuleGetCount = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginModuleGetCount', bass_vst_module))
# char* SFXDEF(BASS_SFX_PluginModuleGetName)(HSFX handle, int module);
BASS_SFX_PluginModuleGetName = func_type(
HSTREAM, HSFX, ctypes.c_int
)(('BASS_SFX_PluginModuleGetName', bass_vst_module))
# LPCWSTR SFXDEF(BASS_SFX_PluginModuleGetNameW)(HSFX handle, int module);
BASS_SFX_PluginModuleGetNameW = func_type(
HSTREAM, HSFX, ctypes.c_int
)(('BASS_SFX_PluginModuleGetNameW', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginModuleSetActive)(HSFX handle, int module);
BASS_SFX_PluginModuleSetActive = func_type(
HSTREAM, HSFX, ctypes.c_int
)(('BASS_SFX_PluginModuleSetActive', bass_vst_module))
# int SFXDEF(BASS_SFX_PluginModuleGetActive)(HSFX handle);
BASS_SFX_PluginModuleGetActive = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginModuleGetActive', bass_vst_module))
# HBITMAP SFXDEF(BASS_SFX_PluginRender)(HSFX handle, HSTREAM hStream, HDC hDC); //only sonique, bassbox, or WMP
BASS_SFX_PluginRender = func_type(
HSTREAM, HSFX, HSTREAM, ctypes.c_void_p
)(('BASS_SFX_PluginRender', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginClicked)(HSFX handle, int x, int y);
BASS_SFX_PluginClicked = func_type(
HSTREAM, HSFX, ctypes.c_int, ctypes.c_int
)(('BASS_SFX_PluginClicked', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginResize)(HSFX handle, int nWidth, int nHeight);
BASS_SFX_PluginResize = func_type(
HSTREAM, HSFX, ctypes.c_int, ctypes.c_int
)(('BASS_SFX_PluginResize', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginResizeMove)(HSFX handle, int nLeft, int nTop, int nWidth, int nHeight);
BASS_SFX_PluginResizeMove = func_type(
HSTREAM, HSFX, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int
)(('BASS_SFX_PluginResizeMove', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_PluginFree)(HSFX handle);
BASS_SFX_PluginFree = func_type(
HSTREAM, HSFX
)(('BASS_SFX_PluginFree', bass_vst_module))
# BOOL SFXDEF(BASS_SFX_Free)();
BASS_SFX_Free = func_type(
HSTREAM
)(('BASS_SFX_Free', bass_vst_module)) | 0.335677 | 0.155655 |
import doubly_linked_list as dll
class LRUCache:
'''
LRUCache represents cache with LRU Heuristic
'''
capacity = 0
doubly_list = dll.DoublyLinkedList()
node_map = {}
def __init__(self, capacity):
'''
Initliaze the cache with fixed capacity
'''
self.capacity = capacity
def get(self, key):
'''
Get the value of the key if present else -1
'''
if key in self.node_map:
node = self.node_map[key]
# Delete the Node to move it to front of the list
self.doubly_list.delete(node)
# Add it back again
self.doubly_list.insert(node)
return node.pair[1]
return -1
def put(self, key, value):
'''
Put the key, value pair in the cache, if cache is full, delete the least
recently used value
'''
if key in self.node_map:
node = self.node_map[key]
# Delete the Node to move it to front of the list
self.doubly_list.delete(node)
# Update the value of the node
node.pair = (key, value)
# Add it back to the Doubly Linked List
self.doubly_list.insert(node)
self.node_map[key] = node
else:
# LRU has reached the capacity, so we need to delete the least recently used value
if len(self.node_map) == self.capacity:
deleted = self.doubly_list.delete_last()
self.node_map.pop(deleted.pair[0])
# Add the new value
node = dll.Node((key, value))
self.node_map[key] = node
self.doubly_list.insert(node)
def __repr__(self):
return repr(self.doubly_list)
if __name__ == '__main__':
cache = LRUCache(3)
cache.put(1, 2)
cache.put(2, 3)
cache.put(3, 5)
assert cache.get(1) == 2
cache.put(4, 7)
assert repr(cache) == '[4,7]->[1,2]->[3,5]->'
print('Current Cache: {}'.format(repr(cache))) | algorithms/lru_cache.py | import doubly_linked_list as dll
class LRUCache:
'''
LRUCache represents cache with LRU Heuristic
'''
capacity = 0
doubly_list = dll.DoublyLinkedList()
node_map = {}
def __init__(self, capacity):
'''
Initliaze the cache with fixed capacity
'''
self.capacity = capacity
def get(self, key):
'''
Get the value of the key if present else -1
'''
if key in self.node_map:
node = self.node_map[key]
# Delete the Node to move it to front of the list
self.doubly_list.delete(node)
# Add it back again
self.doubly_list.insert(node)
return node.pair[1]
return -1
def put(self, key, value):
'''
Put the key, value pair in the cache, if cache is full, delete the least
recently used value
'''
if key in self.node_map:
node = self.node_map[key]
# Delete the Node to move it to front of the list
self.doubly_list.delete(node)
# Update the value of the node
node.pair = (key, value)
# Add it back to the Doubly Linked List
self.doubly_list.insert(node)
self.node_map[key] = node
else:
# LRU has reached the capacity, so we need to delete the least recently used value
if len(self.node_map) == self.capacity:
deleted = self.doubly_list.delete_last()
self.node_map.pop(deleted.pair[0])
# Add the new value
node = dll.Node((key, value))
self.node_map[key] = node
self.doubly_list.insert(node)
def __repr__(self):
return repr(self.doubly_list)
if __name__ == '__main__':
cache = LRUCache(3)
cache.put(1, 2)
cache.put(2, 3)
cache.put(3, 5)
assert cache.get(1) == 2
cache.put(4, 7)
assert repr(cache) == '[4,7]->[1,2]->[3,5]->'
print('Current Cache: {}'.format(repr(cache))) | 0.596198 | 0.354992 |
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch
import torch.utils.data as data
from RandAugment import RandAugment
def get_cifar_transforms(args):
mean = [0.49139968, 0.48215827, 0.44653124]
std = [0.24703233, 0.24348505, 0.26158768]
normalize = transforms.Normalize(mean, std)
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.randaugment :
train_transform.transforms.insert(0, RandAugment(args.randaugment[0], args.randaugment[1]))
return train_transform, valid_transform
class CIFAR :
def __init__(self, args, dataset='cifar10'):
self.args = args
Data = datasets.CIFAR10 if dataset == 'cifar10' else datasets.CIFAR100
root = f'data/{dataset}/' if args.data_dir is None else args.data_dir
train_transform, valid_transform = get_cifar_transforms(args)
train = Data(root=root,
train=True,
transform=train_transform,
download=True)
valid = Data(root=root,
train=False,
transform=valid_transform,
download=True)
test = None
if args.distributed:
sampler_train = data.DistributedSampler(train, shuffle=True)
sampler_valid = data.DistributedSampler(valid, shuffle=False)
else:
sampler_train = data.RandomSampler(train)
sampler_valid = data.SequentialSampler(valid)
self.sampler_train = sampler_train
self.sampler_valid = sampler_valid
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
self.train_loader = data.DataLoader(train,
batch_sampler=batch_sampler_train,
pin_memory=False,
num_workers=args.num_workers)
self.valid_loader = data.DataLoader(valid,
batch_size=args.batch_size,
sampler=sampler_valid,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers)
self.test_loader = None | src/dataset/cifar.py | import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch
import torch.utils.data as data
from RandAugment import RandAugment
def get_cifar_transforms(args):
mean = [0.49139968, 0.48215827, 0.44653124]
std = [0.24703233, 0.24348505, 0.26158768]
normalize = transforms.Normalize(mean, std)
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.randaugment :
train_transform.transforms.insert(0, RandAugment(args.randaugment[0], args.randaugment[1]))
return train_transform, valid_transform
class CIFAR :
def __init__(self, args, dataset='cifar10'):
self.args = args
Data = datasets.CIFAR10 if dataset == 'cifar10' else datasets.CIFAR100
root = f'data/{dataset}/' if args.data_dir is None else args.data_dir
train_transform, valid_transform = get_cifar_transforms(args)
train = Data(root=root,
train=True,
transform=train_transform,
download=True)
valid = Data(root=root,
train=False,
transform=valid_transform,
download=True)
test = None
if args.distributed:
sampler_train = data.DistributedSampler(train, shuffle=True)
sampler_valid = data.DistributedSampler(valid, shuffle=False)
else:
sampler_train = data.RandomSampler(train)
sampler_valid = data.SequentialSampler(valid)
self.sampler_train = sampler_train
self.sampler_valid = sampler_valid
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
self.train_loader = data.DataLoader(train,
batch_sampler=batch_sampler_train,
pin_memory=False,
num_workers=args.num_workers)
self.valid_loader = data.DataLoader(valid,
batch_size=args.batch_size,
sampler=sampler_valid,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers)
self.test_loader = None | 0.7641 | 0.662455 |
from typing import Union
import torch
from torch import distributions
class BoxUniform(distributions.Independent):
def __init__(
self,
low: Union[torch.Tensor, float],
high: Union[torch.Tensor, float],
reinterpreted_batch_ndims: int = 1,
):
"""Multidimensionqal uniform distribution defined on a box.
A `Uniform` distribution initialized with e.g. a parameter vector low or high of length 3 will result in a /batch/ dimension of length 3. A log_prob evaluation will then output three numbers, one for each of the independent Uniforms in the batch. Instead, a `BoxUniform` initialized in the same way has three /event/ dimensions, and returns a scalar log_prob corresponding to whether the evaluated point is in the box defined by low and high or outside.
Refer to torch.distributions.Uniform and torch.distributions.Independent for further documentation.
Args:
low (Tensor or float): lower range (inclusive).
high (Tensor or float): upper range (exclusive).
reinterpreted_batch_ndims (int): the number of batch dims to
reinterpret as event dims.
"""
super().__init__(
distributions.Uniform(low=low, high=high), reinterpreted_batch_ndims
)
class MG1Uniform(distributions.Uniform):
def log_prob(self, value):
return super().log_prob(self._to_noise(value))
def sample(self, sample_shape=torch.Size()):
return self._to_parameters(super().sample(sample_shape))
def _to_parameters(self, noise):
A_inv = torch.tensor([[1.0, 1, 0], [0, 1, 0], [0, 0, 1]])
return noise @ A_inv
def _to_noise(self, parameters):
A = torch.tensor([[1.0, -1, 0], [0, 1, 0], [0, 0, 1]])
return parameters @ A
class LotkaVolterraOscillating:
def __init__(self):
mean = torch.log(torch.tensor([0.01, 0.5, 1, 0.01]))
sigma = 0.5
covariance = sigma ** 2 * torch.eye(4)
self._gaussian = distributions.MultivariateNormal(
loc=mean, covariance_matrix=covariance
)
self._uniform = BoxUniform(low=-5 * torch.ones(4), high=2 * torch.ones(4))
self._log_normalizer = -torch.log(
torch.erf((2 - mean) / sigma) - torch.erf((-5 - mean) / sigma)
).sum()
def log_prob(self, value):
unnormalized_log_prob = self._gaussian.log_prob(value) + self._uniform.log_prob(
value
)
return self._log_normalizer + unnormalized_log_prob
def sample(self, sample_shape=torch.Size()):
num_remaining_samples = sample_shape[0]
samples = []
while num_remaining_samples > 0:
candidate_samples = self._gaussian.sample((num_remaining_samples,))
uniform_log_prob = self._uniform.log_prob(candidate_samples)
accepted_samples = candidate_samples[~torch.isinf(uniform_log_prob)]
samples.append(accepted_samples.detach())
num_accepted = (~torch.isinf(uniform_log_prob)).sum().item()
num_remaining_samples -= num_accepted
# Aggregate collected samples.
samples = torch.cat(samples)
# Make sure we have the right amount.
samples = samples[: sample_shape[0], ...]
assert samples.shape[0] == sample_shape[0]
return samples | nflows/distributions/uniform.py | from typing import Union
import torch
from torch import distributions
class BoxUniform(distributions.Independent):
def __init__(
self,
low: Union[torch.Tensor, float],
high: Union[torch.Tensor, float],
reinterpreted_batch_ndims: int = 1,
):
"""Multidimensionqal uniform distribution defined on a box.
A `Uniform` distribution initialized with e.g. a parameter vector low or high of length 3 will result in a /batch/ dimension of length 3. A log_prob evaluation will then output three numbers, one for each of the independent Uniforms in the batch. Instead, a `BoxUniform` initialized in the same way has three /event/ dimensions, and returns a scalar log_prob corresponding to whether the evaluated point is in the box defined by low and high or outside.
Refer to torch.distributions.Uniform and torch.distributions.Independent for further documentation.
Args:
low (Tensor or float): lower range (inclusive).
high (Tensor or float): upper range (exclusive).
reinterpreted_batch_ndims (int): the number of batch dims to
reinterpret as event dims.
"""
super().__init__(
distributions.Uniform(low=low, high=high), reinterpreted_batch_ndims
)
class MG1Uniform(distributions.Uniform):
def log_prob(self, value):
return super().log_prob(self._to_noise(value))
def sample(self, sample_shape=torch.Size()):
return self._to_parameters(super().sample(sample_shape))
def _to_parameters(self, noise):
A_inv = torch.tensor([[1.0, 1, 0], [0, 1, 0], [0, 0, 1]])
return noise @ A_inv
def _to_noise(self, parameters):
A = torch.tensor([[1.0, -1, 0], [0, 1, 0], [0, 0, 1]])
return parameters @ A
class LotkaVolterraOscillating:
def __init__(self):
mean = torch.log(torch.tensor([0.01, 0.5, 1, 0.01]))
sigma = 0.5
covariance = sigma ** 2 * torch.eye(4)
self._gaussian = distributions.MultivariateNormal(
loc=mean, covariance_matrix=covariance
)
self._uniform = BoxUniform(low=-5 * torch.ones(4), high=2 * torch.ones(4))
self._log_normalizer = -torch.log(
torch.erf((2 - mean) / sigma) - torch.erf((-5 - mean) / sigma)
).sum()
def log_prob(self, value):
unnormalized_log_prob = self._gaussian.log_prob(value) + self._uniform.log_prob(
value
)
return self._log_normalizer + unnormalized_log_prob
def sample(self, sample_shape=torch.Size()):
num_remaining_samples = sample_shape[0]
samples = []
while num_remaining_samples > 0:
candidate_samples = self._gaussian.sample((num_remaining_samples,))
uniform_log_prob = self._uniform.log_prob(candidate_samples)
accepted_samples = candidate_samples[~torch.isinf(uniform_log_prob)]
samples.append(accepted_samples.detach())
num_accepted = (~torch.isinf(uniform_log_prob)).sum().item()
num_remaining_samples -= num_accepted
# Aggregate collected samples.
samples = torch.cat(samples)
# Make sure we have the right amount.
samples = samples[: sample_shape[0], ...]
assert samples.shape[0] == sample_shape[0]
return samples | 0.974215 | 0.868827 |
import nltk.tokenize.punkt
from os import listdir
from os import path
import re
import io
import shutil
import os
import sys, getopt
# load the sentence tokenizer
ab_tokenizer = nltk.data.load("abkhaz_tokenizer.pickle")
ru_tokenizer = nltk.data.load("russian.pickle")
speech_tokenset = (
"иҳәеит", "рҳәеит", "сҳәеит", "шәҳәеит", "ҳҳәеит", "бҳәеит", "лҳәеит",
"иҳәоит", "рҳәоит", "сҳәоит", "шәҳәоит", "ҳҳәоит", "бҳәоит", "лҳәоит",
"иҳәон", "рҳәон", "сҳәон", "шәҳәон", "ҳҳәон", "бҳәон", "лҳәон",
"ҳәа", "лҳәаит","иҳәит","иӡбит","Дыхәнет","рҳәит", "дҵааит")
acronyms = [
"А.","Ҟ.","Ц.","Ҵ.","У.","К.","Қ.","Е.","Н.","Г.","Ӷ.","Ш.","З.","Ӡ.","Х.","Ҳ.",
"Ҿ.","Ф.","В.","П.","Ԥ.","Р.","Л.","Д.","Ж.","Ҽ.","Џ.","Ч.","Ҷ.","С.","М.","Т.",
"Ҭ.","Ҩ.","Ҟә.","Цә.","Ҵә.","Кә.","Қә.","Гә.","Ӷә.","Шә.","Ӡә.","Хә.","Ҳә.",
"Дә.","Жә.","Тә.","Ҭә.","Ҟь.","Кь.","Қь.","Гь.","Ӷь.","Хь.","Жь.","Џь.","Шь."
]
inputfile = ''
outputfile = 'output.txt'
language = ''
def main(argv):
global inputfile
global outputfile
global language
try:
opts, args = getopt.getopt(argv,"hi:o:l:",["ifile=","ofile=","lang="])
except getopt.GetoptError:
print('tokenize_ab_ru.py -i <inputfile> -o <outputfile> -l <languagecode>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('tokenize_ab_ru.py -i <inputfile> -o <outputfile> -l <languagecode>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-l", "--lang"):
language = arg
def ends_with_acronym(sentence):
for acronym in acronyms:
if sentence.endswith(acronym):
return True
return False
def change_hypen(text):
'''
We change all the new lines with hyphens
'''
text = text.replace("-\n","")
return text
remHyphen = re.compile("^ – |^– |^- |^ - ")
upper_lower = re.compile('([ҞЦУКЕНГШӘЗХҾФЫВАПРОЛДЖҼЏЧСМИТЬБҨҴҚӶӠҲԤҶҬ]{2,})([^ҟцукенгшәзхҿфывапролджҽџчсмитьбҩҵқӷӡҳԥҷҭҞЦУКЕНГШӘЗХҾФЫВАПРОЛДЖҼЏЧСМИТЬБҨҴҚӶӠҲԤҶҬ]+)([ҞЦУКЕНГШӘЗХҾФЫВАПРОЛДЖҼЏЧСМИТЬБҨҴҚӶӠҲԤҶҬ]{2,})')
def correct_sentences(sentences):
for i,sentence in enumerate(sentences[:]):
if sentence.startswith("!»"):
#the newline should start after "!»"
#so we delete the start from the sentence
sentences[i] = sentence[2:]
sentence = sentence[2:]
# and put it to the sentence before
sentences[i-1] += "!»"
# the sentence should not end with an acronym
if i+1<len(sentences) and ends_with_acronym(sentence):
# let us combine those sentences
sentences[i] = sentence + sentences[i+1]
# and empty the following sentence in the list
sentences[i+1] = ""
if sentence.startswith(speech_tokenset):
# we combine the speech with the post word of the speech
sentences[i-1] += sentence
# and empty the post word
sentences[i] = ""
for i,sentence in enumerate(sentences[:]):
sentences[i] = remHyphen.sub('', sentence)
sentences[i] = sentence.capitalize()
return sentences
if __name__ == "__main__":
main(sys.argv[1:])
infile = io.open(inputfile,'r', encoding="utf-8")
content = infile.read()
if language == "ab":
content = correct_sentences(ab_tokenizer.tokenize(content))
elif language == "ru":
content = correct_sentences(ru_tokenizer.tokenize(content))
outfile = io.open(outputfile,'w', encoding="utf-8")
for line in content:
outfile.write(line+"\n") | utils/tokenize_ab_ru.py | import nltk.tokenize.punkt
from os import listdir
from os import path
import re
import io
import shutil
import os
import sys, getopt
# load the sentence tokenizer
ab_tokenizer = nltk.data.load("abkhaz_tokenizer.pickle")
ru_tokenizer = nltk.data.load("russian.pickle")
speech_tokenset = (
"иҳәеит", "рҳәеит", "сҳәеит", "шәҳәеит", "ҳҳәеит", "бҳәеит", "лҳәеит",
"иҳәоит", "рҳәоит", "сҳәоит", "шәҳәоит", "ҳҳәоит", "бҳәоит", "лҳәоит",
"иҳәон", "рҳәон", "сҳәон", "шәҳәон", "ҳҳәон", "бҳәон", "лҳәон",
"ҳәа", "лҳәаит","иҳәит","иӡбит","Дыхәнет","рҳәит", "дҵааит")
acronyms = [
"А.","Ҟ.","Ц.","Ҵ.","У.","К.","Қ.","Е.","Н.","Г.","Ӷ.","Ш.","З.","Ӡ.","Х.","Ҳ.",
"Ҿ.","Ф.","В.","П.","Ԥ.","Р.","Л.","Д.","Ж.","Ҽ.","Џ.","Ч.","Ҷ.","С.","М.","Т.",
"Ҭ.","Ҩ.","Ҟә.","Цә.","Ҵә.","Кә.","Қә.","Гә.","Ӷә.","Шә.","Ӡә.","Хә.","Ҳә.",
"Дә.","Жә.","Тә.","Ҭә.","Ҟь.","Кь.","Қь.","Гь.","Ӷь.","Хь.","Жь.","Џь.","Шь."
]
inputfile = ''
outputfile = 'output.txt'
language = ''
def main(argv):
global inputfile
global outputfile
global language
try:
opts, args = getopt.getopt(argv,"hi:o:l:",["ifile=","ofile=","lang="])
except getopt.GetoptError:
print('tokenize_ab_ru.py -i <inputfile> -o <outputfile> -l <languagecode>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('tokenize_ab_ru.py -i <inputfile> -o <outputfile> -l <languagecode>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-l", "--lang"):
language = arg
def ends_with_acronym(sentence):
for acronym in acronyms:
if sentence.endswith(acronym):
return True
return False
def change_hypen(text):
'''
We change all the new lines with hyphens
'''
text = text.replace("-\n","")
return text
remHyphen = re.compile("^ – |^– |^- |^ - ")
upper_lower = re.compile('([ҞЦУКЕНГШӘЗХҾФЫВАПРОЛДЖҼЏЧСМИТЬБҨҴҚӶӠҲԤҶҬ]{2,})([^ҟцукенгшәзхҿфывапролджҽџчсмитьбҩҵқӷӡҳԥҷҭҞЦУКЕНГШӘЗХҾФЫВАПРОЛДЖҼЏЧСМИТЬБҨҴҚӶӠҲԤҶҬ]+)([ҞЦУКЕНГШӘЗХҾФЫВАПРОЛДЖҼЏЧСМИТЬБҨҴҚӶӠҲԤҶҬ]{2,})')
def correct_sentences(sentences):
for i,sentence in enumerate(sentences[:]):
if sentence.startswith("!»"):
#the newline should start after "!»"
#so we delete the start from the sentence
sentences[i] = sentence[2:]
sentence = sentence[2:]
# and put it to the sentence before
sentences[i-1] += "!»"
# the sentence should not end with an acronym
if i+1<len(sentences) and ends_with_acronym(sentence):
# let us combine those sentences
sentences[i] = sentence + sentences[i+1]
# and empty the following sentence in the list
sentences[i+1] = ""
if sentence.startswith(speech_tokenset):
# we combine the speech with the post word of the speech
sentences[i-1] += sentence
# and empty the post word
sentences[i] = ""
for i,sentence in enumerate(sentences[:]):
sentences[i] = remHyphen.sub('', sentence)
sentences[i] = sentence.capitalize()
return sentences
if __name__ == "__main__":
main(sys.argv[1:])
infile = io.open(inputfile,'r', encoding="utf-8")
content = infile.read()
if language == "ab":
content = correct_sentences(ab_tokenizer.tokenize(content))
elif language == "ru":
content = correct_sentences(ru_tokenizer.tokenize(content))
outfile = io.open(outputfile,'w', encoding="utf-8")
for line in content:
outfile.write(line+"\n") | 0.112918 | 0.303706 |
import itertools # set iterator for next word in the file
import pandas as pd
from sklearn.preprocessing import LabelEncoder # convert categorical variables into numerical variables
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn import metrics # Import scikit-learn metrics module for accuracy calculation
import sys
# open files
sbd_train_file = sys.argv[1] or "SBD.train"
sbd_test_file = sys.argv[2] or "SBD.test"
data = open(sbd_train_file, "r")
test_data = open(sbd_test_file, "r")
# set iterator for next word in the file
list_cycle = itertools.cycle(data)
list_cycle_test = itertools.cycle(test_data)
data_features = []
test_data_features = []
test_out = []
# extract all features from train data and save it in "list of features " List
for words in data:
word = words.split()
if word[1][-1] == ".":
label = word[2]
left_word = word[1]
list_right_words = next(list_cycle)
right_words = list_right_words.split()
right_word = right_words[1]
data_features.append(
[left_word, right_word, str(left_word[0].isupper()), str(right_word[0].isupper()),
str(len(left_word) < 3),
str(len(right_word) < 3), str(len(left_word) > 5), str(len(right_word) > 5), label])
# extract all features from test data and save it in "test_data_features " List
for words in test_data:
word = words.split()
if word[1][-1] == ".":
label = word[2]
left_word = word[1]
list_right_words = next(list_cycle_test)
right_words = list_right_words.split()
right_word = right_words[1]
test_data_features.append(
[left_word, right_word, str(left_word[0].isupper()), str(right_word[0].isupper()),
str(len(left_word) < 3),
str(len(right_word) < 3), str(len(left_word) > 5), str(len(right_word) > 5), label])
test_out.append([word[0], word[1]])
# place "Data & Test_Data" in pandas DataFrame
col_names = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3", "R less than 3", "L more than 5",
"R more than 5", "label"]
train_data = pd.DataFrame(data_features, columns=col_names)
test_data = pd.DataFrame(test_data_features, columns=col_names)
the_label = test_data.label
# Encoder function to Convert Pandas Categorical Data
def Encoder(df):
columns_to_encode = list(df.select_dtypes(include=['category', 'object']))
le = LabelEncoder()
for feature in columns_to_encode:
try:
df[feature] = le.fit_transform(df[feature])
except:
print('Error encoding ' + feature)
return df
train_data_encoded = Encoder(train_data) # Encode train data set
test_data_encoded = Encoder(test_data) # Encode test data set
all_feature_cols = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3", "R less than 3", "L more than 5",
"R more than 5"]
core_feature_cols = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3"]
my_feature_cols = ["R less than 3", "L more than 5", "R more than 5"]
all_features = train_data_encoded[all_feature_cols] # all Features of train Data
core_feature = train_data_encoded[core_feature_cols] # core Features of train Data
my_features = train_data_encoded[my_feature_cols] # my Features of train Data
encoded_train_label = train_data_encoded.label
all_test = test_data_encoded[all_feature_cols]
core_test = test_data_encoded[core_feature_cols]
my_test = test_data_encoded[my_feature_cols]
encoded_test_label = test_data_encoded.label
# Create Decision Tree classifer object
clf_all = DecisionTreeClassifier()
clf_core = DecisionTreeClassifier()
clf_my = DecisionTreeClassifier()
# Train Decision Tree Classifer
all_fit = clf_all.fit(all_features, encoded_train_label)
core_fit = clf_core.fit(core_feature, encoded_train_label)
my_fit = clf_my.fit(my_features, encoded_train_label)
# Predict the response for test dataset
all_pred = all_fit.predict(all_test)
print("Accuracy for all features:", metrics.accuracy_score(encoded_test_label, all_pred), "%")
core_pred = core_fit.predict(core_test)
print("Accuracy for core features:", metrics.accuracy_score(encoded_test_label, core_pred), "%")
my_pred = my_fit.predict(my_test)
print("Accuracy for my features:", metrics.accuracy_score(encoded_test_label, my_pred), "%")
# create SBD.test.out csv file
df_pre = pd.DataFrame(all_pred)
test_out_df = pd.DataFrame(test_out, columns=["Word_#", "Word"])
le = LabelEncoder()
le.fit(the_label)
final_results = le.inverse_transform(df_pre[0])
test_out_df["my_prediction"] = final_results
test_out_df.to_csv("SBD.test.out") | NLP/SENTENCE_BOUNDARY_DICTION.py | import itertools # set iterator for next word in the file
import pandas as pd
from sklearn.preprocessing import LabelEncoder # convert categorical variables into numerical variables
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn import metrics # Import scikit-learn metrics module for accuracy calculation
import sys
# open files
sbd_train_file = sys.argv[1] or "SBD.train"
sbd_test_file = sys.argv[2] or "SBD.test"
data = open(sbd_train_file, "r")
test_data = open(sbd_test_file, "r")
# set iterator for next word in the file
list_cycle = itertools.cycle(data)
list_cycle_test = itertools.cycle(test_data)
data_features = []
test_data_features = []
test_out = []
# extract all features from train data and save it in "list of features " List
for words in data:
word = words.split()
if word[1][-1] == ".":
label = word[2]
left_word = word[1]
list_right_words = next(list_cycle)
right_words = list_right_words.split()
right_word = right_words[1]
data_features.append(
[left_word, right_word, str(left_word[0].isupper()), str(right_word[0].isupper()),
str(len(left_word) < 3),
str(len(right_word) < 3), str(len(left_word) > 5), str(len(right_word) > 5), label])
# extract all features from test data and save it in "test_data_features " List
for words in test_data:
word = words.split()
if word[1][-1] == ".":
label = word[2]
left_word = word[1]
list_right_words = next(list_cycle_test)
right_words = list_right_words.split()
right_word = right_words[1]
test_data_features.append(
[left_word, right_word, str(left_word[0].isupper()), str(right_word[0].isupper()),
str(len(left_word) < 3),
str(len(right_word) < 3), str(len(left_word) > 5), str(len(right_word) > 5), label])
test_out.append([word[0], word[1]])
# place "Data & Test_Data" in pandas DataFrame
col_names = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3", "R less than 3", "L more than 5",
"R more than 5", "label"]
train_data = pd.DataFrame(data_features, columns=col_names)
test_data = pd.DataFrame(test_data_features, columns=col_names)
the_label = test_data.label
# Encoder function to Convert Pandas Categorical Data
def Encoder(df):
columns_to_encode = list(df.select_dtypes(include=['category', 'object']))
le = LabelEncoder()
for feature in columns_to_encode:
try:
df[feature] = le.fit_transform(df[feature])
except:
print('Error encoding ' + feature)
return df
train_data_encoded = Encoder(train_data) # Encode train data set
test_data_encoded = Encoder(test_data) # Encode test data set
all_feature_cols = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3", "R less than 3", "L more than 5",
"R more than 5"]
core_feature_cols = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3"]
my_feature_cols = ["R less than 3", "L more than 5", "R more than 5"]
all_features = train_data_encoded[all_feature_cols] # all Features of train Data
core_feature = train_data_encoded[core_feature_cols] # core Features of train Data
my_features = train_data_encoded[my_feature_cols] # my Features of train Data
encoded_train_label = train_data_encoded.label
all_test = test_data_encoded[all_feature_cols]
core_test = test_data_encoded[core_feature_cols]
my_test = test_data_encoded[my_feature_cols]
encoded_test_label = test_data_encoded.label
# Create Decision Tree classifer object
clf_all = DecisionTreeClassifier()
clf_core = DecisionTreeClassifier()
clf_my = DecisionTreeClassifier()
# Train Decision Tree Classifer
all_fit = clf_all.fit(all_features, encoded_train_label)
core_fit = clf_core.fit(core_feature, encoded_train_label)
my_fit = clf_my.fit(my_features, encoded_train_label)
# Predict the response for test dataset
all_pred = all_fit.predict(all_test)
print("Accuracy for all features:", metrics.accuracy_score(encoded_test_label, all_pred), "%")
core_pred = core_fit.predict(core_test)
print("Accuracy for core features:", metrics.accuracy_score(encoded_test_label, core_pred), "%")
my_pred = my_fit.predict(my_test)
print("Accuracy for my features:", metrics.accuracy_score(encoded_test_label, my_pred), "%")
# create SBD.test.out csv file
df_pre = pd.DataFrame(all_pred)
test_out_df = pd.DataFrame(test_out, columns=["Word_#", "Word"])
le = LabelEncoder()
le.fit(the_label)
final_results = le.inverse_transform(df_pre[0])
test_out_df["my_prediction"] = final_results
test_out_df.to_csv("SBD.test.out") | 0.439747 | 0.472136 |
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionMedianTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_median_scalar(self):
self.assertEqual(dnp.median(0.5), np.median(0.5))
self.assertEqual(dnp.median(1), np.median(1))
self.assertEqual(dnp.median(-1), np.median(-1))
self.assertEqual(dnp.median(0), np.median(0))
self.assertEqual(dnp.isnan(dnp.median(dnp.nan)), True)
self.assertEqual(np.isnan(np.median(np.nan)), True)
def test_function_math_median_list(self):
npa = np.median([1, 8, 27, -27, 0, 5, np.nan])
dnpa = dnp.median([1, 8, 27, -27, 0, 5, dnp.nan])
assert_array_equal(dnpa, npa)
def test_function_math_median_array(self):
npa = np.median(np.array([1, 8, 27, -27, 0, 5, np.nan]))
dnpa = dnp.median(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]))
assert_array_equal(dnpa, npa)
def test_function_math_median_series(self):
ps = pd.Series([-1, 8, 27, -27, 0, 5, np.nan])
os = orca.Series(ps)
self.assertEqual(dnp.isnan(dnp.median(os)), True)
self.assertEqual(np.isnan(np.median(ps)), True)
ps = pd.Series([-1, 8, 27, -27, 0, 5])
os = orca.Series(ps)
self.assertEqual(dnp.median(os), np.median(ps))
def test_function_math_median_dataframe(self):
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],
"colb": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.isnan(dnp.median(odf)), True)
self.assertEqual(np.isnan(np.median(pdf)), True)
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0],
"colb": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.median(odf), np.median(pdf))
if __name__ == '__main__':
unittest.main() | tests/numpy_unit_testing/test_function_statistical_median.py | import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionMedianTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_median_scalar(self):
self.assertEqual(dnp.median(0.5), np.median(0.5))
self.assertEqual(dnp.median(1), np.median(1))
self.assertEqual(dnp.median(-1), np.median(-1))
self.assertEqual(dnp.median(0), np.median(0))
self.assertEqual(dnp.isnan(dnp.median(dnp.nan)), True)
self.assertEqual(np.isnan(np.median(np.nan)), True)
def test_function_math_median_list(self):
npa = np.median([1, 8, 27, -27, 0, 5, np.nan])
dnpa = dnp.median([1, 8, 27, -27, 0, 5, dnp.nan])
assert_array_equal(dnpa, npa)
def test_function_math_median_array(self):
npa = np.median(np.array([1, 8, 27, -27, 0, 5, np.nan]))
dnpa = dnp.median(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]))
assert_array_equal(dnpa, npa)
def test_function_math_median_series(self):
ps = pd.Series([-1, 8, 27, -27, 0, 5, np.nan])
os = orca.Series(ps)
self.assertEqual(dnp.isnan(dnp.median(os)), True)
self.assertEqual(np.isnan(np.median(ps)), True)
ps = pd.Series([-1, 8, 27, -27, 0, 5])
os = orca.Series(ps)
self.assertEqual(dnp.median(os), np.median(ps))
def test_function_math_median_dataframe(self):
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],
"colb": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.isnan(dnp.median(odf)), True)
self.assertEqual(np.isnan(np.median(pdf)), True)
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0],
"colb": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.median(odf), np.median(pdf))
if __name__ == '__main__':
unittest.main() | 0.611034 | 0.642713 |
LIST_VIDEO_RESPONSES = [
{
"nextPageToken": "~!!~AI9FV7Tc4k5BiAr1Ckwyu",
"files": [
{
"id": "12JCgxaoHrGvd_Vy5grfCTHr",
"name": "test_video_1.mp4",
"mimeType": "video/mp4",
"parents": ["1lSSPf_kx83O0fcmSA9n4-c3dnB"],
"webContentLink": "https://drive.google.com/uc?id=12JCgxaoHrGvd_Vy5grfCTHr&export=download",
"createdTime": "2021-07-28T00:06:40.439Z",
"modifiedTime": "2021-07-29T16:25:19.375Z",
"md5Checksum": "633410252",
"trashed": False,
},
{
"id": "1Co1ZE7nodTjCqXuyFl10B38",
"name": "test_video_2.mp4",
"mimeType": "video/mp4",
"parents": ["TepPI157C9za"],
"webContentLink": "https://drive.google.com/uc?id=1Co1ZE7nodTjCqXuyFl10B38&export=download",
"createdTime": "2019-08-27T12:51:41.000Z",
"modifiedTime": "2021-07-29T16:25:19.187Z",
"md5Checksum": "3827293107",
"trashed": False,
},
],
},
{
"files": [
{
"id": "Vy5grfCTHr_12JCgxaoHrGvd",
"name": "test_video_1.mp4",
"mimeType": "video/mp4",
"parents": ["1lSSPf_kx83O0fcmSA9n4-c3dnB"],
"webContentLink": "https://drive.google.com/uc?id=Vy5grfCTHr_12JCgxaoHrGvd&export=download",
"createdTime": "2021-07-28T00:06:40.439Z",
"modifiedTime": "2021-07-29T14:25:19.375Z",
"md5Checksum": "633410252",
"trashed": False,
},
{
"id": "XuyFl10B381Co1ZE7nodTjCq",
"name": "test_video_2.mp4",
"mimeType": "video/mp4",
"parents": ["TepPI157C9za"],
"webContentLink": "https://drive.google.com/uc?id=XuyFl10B381Co1ZE7nodTjCq&export=download",
"createdTime": "2020-08-27T12:51:41.000Z",
"modifiedTime": "2021-07-30T12:25:19.187Z",
"md5Checksum": "3827293107",
"trashed": False,
},
]
},
]
LIST_FILE_RESPONSES = [
{
"files": [
{
"id": "Ay5grfCTHr_12JCgxaoHrGve",
"name": "test_image.jpg",
"mimeType": "image/jpeg",
"parents": ["websiteFileFinalFolderId"],
"webContentLink": "https://drive.google.com/uc?id=Ay5grfCTHr_12JCgxaoHrGve&export=download",
"createdTime": "2021-07-28T00:06:40.439Z",
"modifiedTime": "2021-07-29T14:25:19.375Z",
"md5Checksum": "633410252",
"trashed": False,
},
{
"id": "BuyFl10B381Co1ZE7nodTjCr",
"name": "test_video_wrong_folder.mp4",
"mimeType": "video/mp4",
"parents": ["websiteFileFinalFolderId"],
"webContentLink": "https://drive.google.com/uc?id=BuyFl10B381Co1ZE7nodTjCr&export=download",
"createdTime": "2020-08-27T12:51:41.000Z",
"modifiedTime": "2021-07-30T12:25:19.187Z",
"md5Checksum": "3827293107",
"trashed": False,
},
]
},
] | gdrive_sync/conftest.py |
LIST_VIDEO_RESPONSES = [
{
"nextPageToken": "~!!~AI9FV7Tc4k5BiAr1Ckwyu",
"files": [
{
"id": "12JCgxaoHrGvd_Vy5grfCTHr",
"name": "test_video_1.mp4",
"mimeType": "video/mp4",
"parents": ["1lSSPf_kx83O0fcmSA9n4-c3dnB"],
"webContentLink": "https://drive.google.com/uc?id=12JCgxaoHrGvd_Vy5grfCTHr&export=download",
"createdTime": "2021-07-28T00:06:40.439Z",
"modifiedTime": "2021-07-29T16:25:19.375Z",
"md5Checksum": "633410252",
"trashed": False,
},
{
"id": "1Co1ZE7nodTjCqXuyFl10B38",
"name": "test_video_2.mp4",
"mimeType": "video/mp4",
"parents": ["TepPI157C9za"],
"webContentLink": "https://drive.google.com/uc?id=1Co1ZE7nodTjCqXuyFl10B38&export=download",
"createdTime": "2019-08-27T12:51:41.000Z",
"modifiedTime": "2021-07-29T16:25:19.187Z",
"md5Checksum": "3827293107",
"trashed": False,
},
],
},
{
"files": [
{
"id": "Vy5grfCTHr_12JCgxaoHrGvd",
"name": "test_video_1.mp4",
"mimeType": "video/mp4",
"parents": ["1lSSPf_kx83O0fcmSA9n4-c3dnB"],
"webContentLink": "https://drive.google.com/uc?id=Vy5grfCTHr_12JCgxaoHrGvd&export=download",
"createdTime": "2021-07-28T00:06:40.439Z",
"modifiedTime": "2021-07-29T14:25:19.375Z",
"md5Checksum": "633410252",
"trashed": False,
},
{
"id": "XuyFl10B381Co1ZE7nodTjCq",
"name": "test_video_2.mp4",
"mimeType": "video/mp4",
"parents": ["TepPI157C9za"],
"webContentLink": "https://drive.google.com/uc?id=XuyFl10B381Co1ZE7nodTjCq&export=download",
"createdTime": "2020-08-27T12:51:41.000Z",
"modifiedTime": "2021-07-30T12:25:19.187Z",
"md5Checksum": "3827293107",
"trashed": False,
},
]
},
]
LIST_FILE_RESPONSES = [
{
"files": [
{
"id": "Ay5grfCTHr_12JCgxaoHrGve",
"name": "test_image.jpg",
"mimeType": "image/jpeg",
"parents": ["websiteFileFinalFolderId"],
"webContentLink": "https://drive.google.com/uc?id=Ay5grfCTHr_12JCgxaoHrGve&export=download",
"createdTime": "2021-07-28T00:06:40.439Z",
"modifiedTime": "2021-07-29T14:25:19.375Z",
"md5Checksum": "633410252",
"trashed": False,
},
{
"id": "BuyFl10B381Co1ZE7nodTjCr",
"name": "test_video_wrong_folder.mp4",
"mimeType": "video/mp4",
"parents": ["websiteFileFinalFolderId"],
"webContentLink": "https://drive.google.com/uc?id=BuyFl10B381Co1ZE7nodTjCr&export=download",
"createdTime": "2020-08-27T12:51:41.000Z",
"modifiedTime": "2021-07-30T12:25:19.187Z",
"md5Checksum": "3827293107",
"trashed": False,
},
]
},
] | 0.380068 | 0.302229 |
from local_file_system import LocalFileSystem
from language_guesser import LanguageGuesser
from support import is_comment
from linear_phase_parser import LinearPhaseParser
def run_study(args):
sentence = args.get('sentence', '')
local_file_system = LocalFileSystem()
local_file_system.initialize(args)
local_file_system.configure_logging()
parser_for = {}
lang_guesser = LanguageGuesser(local_file_system.external_sources["lexicon_file_name"])
for language in lang_guesser.languages:
parser_for[language] = LinearPhaseParser(local_file_system, language)
parser_for[language].initialize()
if not sentence:
sentences_to_parse = [(sentence, group, part_of_conversation)
for (sentence, group, part_of_conversation)
in local_file_system.read_test_corpus()]
else:
sentences_to_parse = [([word.strip() for word in sentence.split()], '1', False)]
sentence_number = 1
for sentence, experimental_group, part_of_conversation in sentences_to_parse:
if not is_comment(sentence):
language = lang_guesser.guess_language(sentence)
local_file_system.print_sentence_to_console(sentence_number, sentence)
parser_for[language].parse(sentence_number, sentence)
local_file_system.save_output(parser_for[language],
sentence_number,
sentence,
experimental_group,
part_of_conversation)
if not part_of_conversation:
parser_for[language].narrow_semantics.global_cognition.end_conversation()
sentence_number = sentence_number + 1
else:
local_file_system.parse_and_analyze_comment(sentence)
local_file_system.write_comment_line(sentence)
# Finish processing
local_file_system.save_surface_vocabulary(parser_for["LANG:EN"].lexicon.surface_vocabulary)
local_file_system.close_all_output_files() | lpparse/main.py | from local_file_system import LocalFileSystem
from language_guesser import LanguageGuesser
from support import is_comment
from linear_phase_parser import LinearPhaseParser
def run_study(args):
sentence = args.get('sentence', '')
local_file_system = LocalFileSystem()
local_file_system.initialize(args)
local_file_system.configure_logging()
parser_for = {}
lang_guesser = LanguageGuesser(local_file_system.external_sources["lexicon_file_name"])
for language in lang_guesser.languages:
parser_for[language] = LinearPhaseParser(local_file_system, language)
parser_for[language].initialize()
if not sentence:
sentences_to_parse = [(sentence, group, part_of_conversation)
for (sentence, group, part_of_conversation)
in local_file_system.read_test_corpus()]
else:
sentences_to_parse = [([word.strip() for word in sentence.split()], '1', False)]
sentence_number = 1
for sentence, experimental_group, part_of_conversation in sentences_to_parse:
if not is_comment(sentence):
language = lang_guesser.guess_language(sentence)
local_file_system.print_sentence_to_console(sentence_number, sentence)
parser_for[language].parse(sentence_number, sentence)
local_file_system.save_output(parser_for[language],
sentence_number,
sentence,
experimental_group,
part_of_conversation)
if not part_of_conversation:
parser_for[language].narrow_semantics.global_cognition.end_conversation()
sentence_number = sentence_number + 1
else:
local_file_system.parse_and_analyze_comment(sentence)
local_file_system.write_comment_line(sentence)
# Finish processing
local_file_system.save_surface_vocabulary(parser_for["LANG:EN"].lexicon.surface_vocabulary)
local_file_system.close_all_output_files() | 0.301979 | 0.10711 |
from panda3d.core import Vec3
from GameObject import *
from Item import Item
from Trigger import Trigger
from Spawner import Spawner
import SpecificItems
import SpecificEnemies
import SpecificMiscObjects
class Level():
def __init__(self, levelFile):
self.levelFile = levelFile
self.geometry = loader.loadModel("Levels/" + levelFile)
self.geometry.reparentTo(render)
try:
moduleObj = __import__("Scripts.{0}".format(levelFile), levelFile)
if moduleObj is not None:
self.scriptObj = getattr(moduleObj, levelFile)
except ImportError as e:
print ("Error importing script-file" + "Scripts." + levelFile)
print (e)
self.enemies = []
self.deadEnemies = []
self.particleSystems = []
self.blasts = []
self.projectiles = []
self.passiveObjects = []
self.items = []
self.triggers = []
self.spawners = {}
self.spawnerGroups = {}
self.registeredSpawnables = {}
if hasattr(SpecificEnemies, "spawnableDict"):
for name, data in SpecificEnemies.spawnableDict.items():
self.registeredSpawnables[name] = (data, True)
if hasattr(SpecificItems, "spawnableDict"):
for name, data in SpecificItems.spawnableDict.items():
self.registeredSpawnables[name] = (data, False)
if hasattr(SpecificMiscObjects, "spawnableDict"):
for name, data in SpecificMiscObjects.spawnableDict.items():
self.registeredSpawnables[name] = (data, False)
self.geometryInterpreters = {
"spawner" : self.buildSpawners,
"trigger" : self.buildTriggers
}
if hasattr(SpecificEnemies, "buildDict"):
for name, callback in SpecificEnemies.buildDict.items():
self.geometryInterpreters[name] = callback
if hasattr(SpecificItems, "buildDict"):
for name, callback in SpecificItems.buildDict.items():
self.geometryInterpreters[name] = callback
if hasattr(SpecificMiscObjects, "buildDict"):
for name, callback in SpecificMiscObjects.buildDict.items():
self.geometryInterpreters[name] = callback
self.spawnersToActivate = []
self.interpretGeometry()
for weapon in base.player.weapons:
self.particleSystems += weapon.particleSystems
for spawnerName in self.spawnersToActivate:
self.activateSpawner(spawnerName)
self.spawnersToActivate = []
def interpretGeometry(self):
for key, callback in self.geometryInterpreters.items():
nps = self.geometry.findAllMatches("**/={0}".format(key))
callback(self, nps)
def buildSpawners(self, level, spawnerNPs):
for np in spawnerNPs:
id = np.getTag("id")
spawnerIsActive = np.getTag("active") == "True"
spawnerGroupName = np.getTag("groupName")
pos = np.getPos(render)
h = np.getH(render)
spawnerName = np.getName()
np.removeNode()
spawnableData, isEnemy = self.registeredSpawnables[id]
spawner = Spawner(spawnableData, pos, h, isEnemy)
self.spawners[spawnerName] = spawner
if spawnerGroupName is not None and len(spawnerGroupName) > 0:
if spawnerGroupName not in self.spawnerGroups:
self.spawnerGroups[spawnerGroupName] = []
self.spawnerGroups[spawnerGroupName].append(spawner)
if spawnerIsActive:
self.spawnersToActivate.append(spawnerName)
def activateSpawner(self, spawnerName):
spawner = self.spawners.get(spawnerName, None)
if spawner is not None:
self.activateSpawnerInternal(spawner)
def activateSpawnerInternal(self, spawner):
if not spawner.isReady:
return
obj = spawner.spawnObj
spawner.spawnObj = None
spawner.isReady = False
if spawner.objIsEnemy:
self.enemies.append(obj)
obj.actor.play("spawn")
else:
if obj.auraName is not None:
auraPath = "Models/Items/{0}".format(obj.auraName)
else:
auraPath = None
item = Item(obj.root.getPos() + Vec3(0, 0, 1), auraPath, obj)
self.items.append(item)
obj.root.wrtReparentTo(render)
def activateSpawnerGroup(self, groupName):
spawnerList = self.spawnerGroups.get(groupName, None)
if spawnerList is not None:
for spawner in spawnerList:
self.activateSpawnerInternal(spawner)
def buildTriggers(self, level, triggerNPs):
for np in triggerNPs:
callbackName = np.getTag("callback")
onlyOnce = np.getTag("onlyOnce") == "True"
active = np.getTag("active") == "True"
trigger = Trigger(callbackName, np, onlyOnce, active)
self.triggers.append(trigger)
def triggerActivated(self, trigger):
if hasattr(self.scriptObj, trigger.callbackName):
getattr(self.scriptObj, trigger.callbackName)(self)
if trigger.onlyOnce:
trigger.active = False
def addBlast(self, model, minSize, maxSize, duration, pos):
blast = Blast(model, minSize, maxSize, duration)
blast.model.reparentTo(render)
blast.model.setPos(pos)
self.blasts.append(blast)
blast.update(0)
def update(self, player, keyMap, dt):
if player is not None:
if player.health > 0:
# Player update
player.update(keyMap, dt)
# Enemy update
[enemy.update(player, dt) for enemy in self.enemies]
newlyDeadEnemies = [enemy for enemy in self.enemies if enemy.health <= 0]
self.enemies = [enemy for enemy in self.enemies if enemy.health > 0]
for enemy in newlyDeadEnemies:
#enemy.collider.removeNode()
enemy.actor.play("die")
if enemy.inControl:
enemy.velocity.set(0, 0, 0)
enemy.walking = False
self.deadEnemies += newlyDeadEnemies
enemiesAnimatingDeaths = []
for enemy in self.deadEnemies:
GameObject.update(enemy, dt)
deathAnimControl = enemy.actor.getAnimControl("die")
if deathAnimControl is None or not deathAnimControl.isPlaying():
enemy.cleanup()
else:
enemiesAnimatingDeaths.append(enemy)
self.deadEnemies = enemiesAnimatingDeaths
# Projectile update
[proj.update(dt) for proj in self.projectiles]
[proj.cleanup() for proj in self.projectiles if proj.maxHealth > 0 and proj.health <= 0]
self.projectiles = [proj for proj in self.projectiles if proj.maxHealth <= 0 or proj.health > 0]
# Passive object update
[obj.update(dt) for obj in self.passiveObjects]
[blast.update(dt) for blast in self.blasts]
[blast.cleanup() for blast in self.blasts if blast.timer <= 0]
self.blasts = [blast for blast in self.blasts if blast.timer > 0]
[system.update(dt) for system in self.particleSystems]
def cleanup(self):
for blast in self.blasts:
blast.cleanup()
self.blasts = []
for trigger in self.triggers:
trigger.cleanup()
self.triggers = []
for spawner in self.spawners.values():
spawner.cleanup()
self.spawners = {}
self.spawnerGroups = {}
for enemy in self.enemies:
enemy.cleanup()
self.enemies = []
for enemy in self.deadEnemies:
enemy.cleanup()
self.deadEnemies = []
for passive in self.passiveObjects:
passive.cleanup()
self.passiveObjects = []
for projectile in self.projectiles:
projectile.cleanup()
self.projectiles = [] | Level.py | from panda3d.core import Vec3
from GameObject import *
from Item import Item
from Trigger import Trigger
from Spawner import Spawner
import SpecificItems
import SpecificEnemies
import SpecificMiscObjects
class Level():
def __init__(self, levelFile):
self.levelFile = levelFile
self.geometry = loader.loadModel("Levels/" + levelFile)
self.geometry.reparentTo(render)
try:
moduleObj = __import__("Scripts.{0}".format(levelFile), levelFile)
if moduleObj is not None:
self.scriptObj = getattr(moduleObj, levelFile)
except ImportError as e:
print ("Error importing script-file" + "Scripts." + levelFile)
print (e)
self.enemies = []
self.deadEnemies = []
self.particleSystems = []
self.blasts = []
self.projectiles = []
self.passiveObjects = []
self.items = []
self.triggers = []
self.spawners = {}
self.spawnerGroups = {}
self.registeredSpawnables = {}
if hasattr(SpecificEnemies, "spawnableDict"):
for name, data in SpecificEnemies.spawnableDict.items():
self.registeredSpawnables[name] = (data, True)
if hasattr(SpecificItems, "spawnableDict"):
for name, data in SpecificItems.spawnableDict.items():
self.registeredSpawnables[name] = (data, False)
if hasattr(SpecificMiscObjects, "spawnableDict"):
for name, data in SpecificMiscObjects.spawnableDict.items():
self.registeredSpawnables[name] = (data, False)
self.geometryInterpreters = {
"spawner" : self.buildSpawners,
"trigger" : self.buildTriggers
}
if hasattr(SpecificEnemies, "buildDict"):
for name, callback in SpecificEnemies.buildDict.items():
self.geometryInterpreters[name] = callback
if hasattr(SpecificItems, "buildDict"):
for name, callback in SpecificItems.buildDict.items():
self.geometryInterpreters[name] = callback
if hasattr(SpecificMiscObjects, "buildDict"):
for name, callback in SpecificMiscObjects.buildDict.items():
self.geometryInterpreters[name] = callback
self.spawnersToActivate = []
self.interpretGeometry()
for weapon in base.player.weapons:
self.particleSystems += weapon.particleSystems
for spawnerName in self.spawnersToActivate:
self.activateSpawner(spawnerName)
self.spawnersToActivate = []
def interpretGeometry(self):
for key, callback in self.geometryInterpreters.items():
nps = self.geometry.findAllMatches("**/={0}".format(key))
callback(self, nps)
def buildSpawners(self, level, spawnerNPs):
for np in spawnerNPs:
id = np.getTag("id")
spawnerIsActive = np.getTag("active") == "True"
spawnerGroupName = np.getTag("groupName")
pos = np.getPos(render)
h = np.getH(render)
spawnerName = np.getName()
np.removeNode()
spawnableData, isEnemy = self.registeredSpawnables[id]
spawner = Spawner(spawnableData, pos, h, isEnemy)
self.spawners[spawnerName] = spawner
if spawnerGroupName is not None and len(spawnerGroupName) > 0:
if spawnerGroupName not in self.spawnerGroups:
self.spawnerGroups[spawnerGroupName] = []
self.spawnerGroups[spawnerGroupName].append(spawner)
if spawnerIsActive:
self.spawnersToActivate.append(spawnerName)
def activateSpawner(self, spawnerName):
spawner = self.spawners.get(spawnerName, None)
if spawner is not None:
self.activateSpawnerInternal(spawner)
def activateSpawnerInternal(self, spawner):
if not spawner.isReady:
return
obj = spawner.spawnObj
spawner.spawnObj = None
spawner.isReady = False
if spawner.objIsEnemy:
self.enemies.append(obj)
obj.actor.play("spawn")
else:
if obj.auraName is not None:
auraPath = "Models/Items/{0}".format(obj.auraName)
else:
auraPath = None
item = Item(obj.root.getPos() + Vec3(0, 0, 1), auraPath, obj)
self.items.append(item)
obj.root.wrtReparentTo(render)
def activateSpawnerGroup(self, groupName):
spawnerList = self.spawnerGroups.get(groupName, None)
if spawnerList is not None:
for spawner in spawnerList:
self.activateSpawnerInternal(spawner)
def buildTriggers(self, level, triggerNPs):
for np in triggerNPs:
callbackName = np.getTag("callback")
onlyOnce = np.getTag("onlyOnce") == "True"
active = np.getTag("active") == "True"
trigger = Trigger(callbackName, np, onlyOnce, active)
self.triggers.append(trigger)
def triggerActivated(self, trigger):
if hasattr(self.scriptObj, trigger.callbackName):
getattr(self.scriptObj, trigger.callbackName)(self)
if trigger.onlyOnce:
trigger.active = False
def addBlast(self, model, minSize, maxSize, duration, pos):
blast = Blast(model, minSize, maxSize, duration)
blast.model.reparentTo(render)
blast.model.setPos(pos)
self.blasts.append(blast)
blast.update(0)
def update(self, player, keyMap, dt):
if player is not None:
if player.health > 0:
# Player update
player.update(keyMap, dt)
# Enemy update
[enemy.update(player, dt) for enemy in self.enemies]
newlyDeadEnemies = [enemy for enemy in self.enemies if enemy.health <= 0]
self.enemies = [enemy for enemy in self.enemies if enemy.health > 0]
for enemy in newlyDeadEnemies:
#enemy.collider.removeNode()
enemy.actor.play("die")
if enemy.inControl:
enemy.velocity.set(0, 0, 0)
enemy.walking = False
self.deadEnemies += newlyDeadEnemies
enemiesAnimatingDeaths = []
for enemy in self.deadEnemies:
GameObject.update(enemy, dt)
deathAnimControl = enemy.actor.getAnimControl("die")
if deathAnimControl is None or not deathAnimControl.isPlaying():
enemy.cleanup()
else:
enemiesAnimatingDeaths.append(enemy)
self.deadEnemies = enemiesAnimatingDeaths
# Projectile update
[proj.update(dt) for proj in self.projectiles]
[proj.cleanup() for proj in self.projectiles if proj.maxHealth > 0 and proj.health <= 0]
self.projectiles = [proj for proj in self.projectiles if proj.maxHealth <= 0 or proj.health > 0]
# Passive object update
[obj.update(dt) for obj in self.passiveObjects]
[blast.update(dt) for blast in self.blasts]
[blast.cleanup() for blast in self.blasts if blast.timer <= 0]
self.blasts = [blast for blast in self.blasts if blast.timer > 0]
[system.update(dt) for system in self.particleSystems]
def cleanup(self):
for blast in self.blasts:
blast.cleanup()
self.blasts = []
for trigger in self.triggers:
trigger.cleanup()
self.triggers = []
for spawner in self.spawners.values():
spawner.cleanup()
self.spawners = {}
self.spawnerGroups = {}
for enemy in self.enemies:
enemy.cleanup()
self.enemies = []
for enemy in self.deadEnemies:
enemy.cleanup()
self.deadEnemies = []
for passive in self.passiveObjects:
passive.cleanup()
self.passiveObjects = []
for projectile in self.projectiles:
projectile.cleanup()
self.projectiles = [] | 0.324342 | 0.113138 |
import sqlite3 as db
from lib.logger import logger as log
class SQLite:
def __init__(self, dbf):
self.conn = db.connect(dbf)
self.cursor = self.conn.cursor()
def fetchall(self, sql, data=[]):
result = None
if self.cursor.execute(sql, list(data)):
result = self.cursor.fetchall()
if len(result) > 0:
return [dict(zip([j[0] for j in self.cursor.description], i)) for i in result]
return result
def fetchone(self, sql, data=[]):
result = None
if self.cursor.execute(sql, list(data)):
result = self.cursor.fetchone()
if result != None:
return dict(zip([j[0] for j in self.cursor.description], result))
return result
def getlist(self, tableName, colums="*", condition="", orders="", limits=""):
sql = "SELECT "+colums+" FROM " + tableName + " WHERE 1=1"
_data = []
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data = condition.values()
else:
sql += condition
sql += " order by "+orders if orders else ""
sql += " limit "+limits if limits else ""
result = self.fetchAll(sql, _data)
return [] if result is None else result
def getone(self, tableName, colums="*", condition="", orders="", limits=""):
sql = "SELECT "+colums+" FROM " + tableName + " WHERE 1=1"
_data = []
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data = condition.values()
else:
sql += condition
sql += " order by "+orders if orders else ""
sql += " limit "+limits if limits else ""
return self.fetchOne(sql, _data)
def insert(self, tableName, data):
sql = "INSERT INTO " + tableName + " ("+",".join(data.keys())+") VALUES ("+("?,"*len(data))[:-1]+")"
status = self.execute(sql, data.values())
# status = self.cursor.execute(sql, data.values())
# self.conn.commit()
return status
def delete(self, tableName, condition):
sql = "DELETE FROM " + tableName + " WHERE 1=1"
_data = []
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data = condition.values()
else:
sql += condition
status = self.execute(sql, _data)
# status = self.cursor.execute(sql, _data)
# self.conn.commit()
return status
def update(self, tableName, data, condition):
sql = "UPDATE " + tableName + " SET "
_data = []
#update data
if isinstance(data, dict):
for i in data.keys():
sql += i+"=?,"
sql = sql[:-1]
_data = list(data.values())
else:
sql = sql + data
#condition
sql += " WHERE 1=1 "
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data += list(condition.values())
else:
sql = sql + condition
status = self.execute(sql, _data)
# status = self.cursor.execute(sql, _data)
# self.conn.commit()
return status
def execute(self, sql, data=[]):
data = tuple(data)
if data:
log.debug('Excute sql: %s with data %s' % (sql, data))
else:
log.debug('Excute sql: %s' % sql)
status = self.cursor.execute(sql, list(data))
self.conn.commit()
return status | commander/lib/sqlite.py | import sqlite3 as db
from lib.logger import logger as log
class SQLite:
def __init__(self, dbf):
self.conn = db.connect(dbf)
self.cursor = self.conn.cursor()
def fetchall(self, sql, data=[]):
result = None
if self.cursor.execute(sql, list(data)):
result = self.cursor.fetchall()
if len(result) > 0:
return [dict(zip([j[0] for j in self.cursor.description], i)) for i in result]
return result
def fetchone(self, sql, data=[]):
result = None
if self.cursor.execute(sql, list(data)):
result = self.cursor.fetchone()
if result != None:
return dict(zip([j[0] for j in self.cursor.description], result))
return result
def getlist(self, tableName, colums="*", condition="", orders="", limits=""):
sql = "SELECT "+colums+" FROM " + tableName + " WHERE 1=1"
_data = []
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data = condition.values()
else:
sql += condition
sql += " order by "+orders if orders else ""
sql += " limit "+limits if limits else ""
result = self.fetchAll(sql, _data)
return [] if result is None else result
def getone(self, tableName, colums="*", condition="", orders="", limits=""):
sql = "SELECT "+colums+" FROM " + tableName + " WHERE 1=1"
_data = []
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data = condition.values()
else:
sql += condition
sql += " order by "+orders if orders else ""
sql += " limit "+limits if limits else ""
return self.fetchOne(sql, _data)
def insert(self, tableName, data):
sql = "INSERT INTO " + tableName + " ("+",".join(data.keys())+") VALUES ("+("?,"*len(data))[:-1]+")"
status = self.execute(sql, data.values())
# status = self.cursor.execute(sql, data.values())
# self.conn.commit()
return status
def delete(self, tableName, condition):
sql = "DELETE FROM " + tableName + " WHERE 1=1"
_data = []
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data = condition.values()
else:
sql += condition
status = self.execute(sql, _data)
# status = self.cursor.execute(sql, _data)
# self.conn.commit()
return status
def update(self, tableName, data, condition):
sql = "UPDATE " + tableName + " SET "
_data = []
#update data
if isinstance(data, dict):
for i in data.keys():
sql += i+"=?,"
sql = sql[:-1]
_data = list(data.values())
else:
sql = sql + data
#condition
sql += " WHERE 1=1 "
if isinstance(condition, dict):
for i in condition.keys():
sql += " AND "+i+"=?"
_data += list(condition.values())
else:
sql = sql + condition
status = self.execute(sql, _data)
# status = self.cursor.execute(sql, _data)
# self.conn.commit()
return status
def execute(self, sql, data=[]):
data = tuple(data)
if data:
log.debug('Excute sql: %s with data %s' % (sql, data))
else:
log.debug('Excute sql: %s' % sql)
status = self.cursor.execute(sql, list(data))
self.conn.commit()
return status | 0.119177 | 0.179674 |
import random
def choose_first():
if random.randint(0, 1) == 0:
return 'Player 2'
else:
return 'Player 1'
def change_player(player):
if player=='Player 1':
return 'Player 2'
else:
return 'Player 1'
def get_board():
b = []
for i in range(0, 24):
ele = input()
b.append(ele) # adding the element
if int(b[x])<10:
b[x]=' '+b[x]
def board_input(board):
i=0
while i not in [1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25] and not board[i]=='X':
i=int(input('enter a no 1 to 25\n'))
return i
def display_board(board):
print(' | | | |')
print(' ' + board[20] + ' | ' + board[21] + ' | ' + board[22] + ' | ' + board[23] + ' | ' + board[24])
print(' | | | |')
print('----------------------')
print(' | | | |')
print(' ' + board[15] + ' | ' + board[16] + ' | ' + board[17] + ' | ' + board[18] + ' | ' + board[19])
print(' | | | |')
print('----------------------')
print(' | | | |')
print(' ' + board[10] + ' | ' + board[11] + ' | ' + board[12] + ' | ' + board[13] + ' | ' + board[14])
print(' | | | |')
print('----------------------')
print(' ' + board[5] + ' | ' + board[6] + ' | ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | | | |')
print('----------------------')
print(' | | | |')
print(' ' + board[0] + ' | ' + board[1] + ' | ' + board[2] + ' | ' + board[3] + ' | ' + board[4])
print(' | | | |')
def win(board):
mark='X'
c=0
if(board[0] == mark and board[1] == mark and board[2] == mark and board[3] == mark and board[4]):
c+=1
if(board[5] == mark and board[6] == mark and board[7] == mark and board[8] == mark and board[9]):
c+=1
if(board[10] == mark and board[14] == mark and board[11] == mark and board[12] == mark and board[13]):
c+=1
if(board[15] == mark and board[16] == mark and board[17] == mark and board[18] == mark and board[19]) :
c+=1
if(board[21] == mark and board[20] == mark and board[23] == mark and board[24] == mark and board[22]) :
c+=1
if(board[20] == mark and board[10] == mark and board[5] == mark and board[0] == mark and board[15]) :
c+=1
if(board[21] == mark and board[16] == mark and board[11] == mark and board[6] == mark and board[1]) :
c+=1
if(board[12] == mark and board[22] == mark and board[17] == mark and board[7] == mark and board[2]) :
c+=1
if(board[18] == mark and board[13] == mark and board[23] == mark and board[8] == mark and board[3]) :
c+=1
if(board[14] == mark and board[19] == mark and board[9] == mark and board[24] == mark and board[4]) :
c+=1
if(board[12] == mark and board[20] == mark and board[16] == mark and board[8] == mark and board[4]) :
c+=1
if(board[18] == mark and board[12] == mark and board[3] == mark and board[6] == mark and board[0]) :
c+=1
if(c>=5):
return True
else:
return False
def crossing( b1,b2,ele):
ind1=b1.index(ele)
b1[ind1]='X'
ind2=b1.index(ele)
b2[ind2]='X'
print('welcome to bingo')
t=input('want to play?(enter y/Y to play)')
if t=='y' or t== 'Y':
print("welcome players\n\n\n ")
player=choose_first()
b1=get_board()
player=change_player(player)
b2=get_board()
for i in range(0,24):
player=change_player()
display_board(b1)
print(player)
i=board_input(b1)
crossing(b1,b2,i)
if win(b1)==True:
print(f'{player} won')
display_board(b1)
print(f"{player}'s board")
print('\n'*5)
change_player(player)
display_board(b2)
print(f"{player}'s board")
break
elif win(b2)==True:
change_player(player)
display_board(b2)
print(f"{player}'s board")
change_player(player)
display_board(b1)
print(f"{player}'s board")
break
else:
print('\n'*50)
change_player(player)
display_board(b1)
print(f"{player}'s board")
i=board_input(b1)
crossing(b1,b2,i)
if win(b2)==True:
print(f'{player} won')
display_board(b1)
print(f"{player}'s board")
print('\n'*5)
change_player(player)
display_board(b2)
print(f"{player}'s board")
break
elif win(b1)==True:
change_player(player)
display_board(b2)
print(f"{player}'s board")
change_player(player)
display_board(b1)
print(f"{player}'s board")
break
else:
print('\n'*50) | bingo.py | import random
def choose_first():
if random.randint(0, 1) == 0:
return 'Player 2'
else:
return 'Player 1'
def change_player(player):
if player=='Player 1':
return 'Player 2'
else:
return 'Player 1'
def get_board():
b = []
for i in range(0, 24):
ele = input()
b.append(ele) # adding the element
if int(b[x])<10:
b[x]=' '+b[x]
def board_input(board):
i=0
while i not in [1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25] and not board[i]=='X':
i=int(input('enter a no 1 to 25\n'))
return i
def display_board(board):
print(' | | | |')
print(' ' + board[20] + ' | ' + board[21] + ' | ' + board[22] + ' | ' + board[23] + ' | ' + board[24])
print(' | | | |')
print('----------------------')
print(' | | | |')
print(' ' + board[15] + ' | ' + board[16] + ' | ' + board[17] + ' | ' + board[18] + ' | ' + board[19])
print(' | | | |')
print('----------------------')
print(' | | | |')
print(' ' + board[10] + ' | ' + board[11] + ' | ' + board[12] + ' | ' + board[13] + ' | ' + board[14])
print(' | | | |')
print('----------------------')
print(' ' + board[5] + ' | ' + board[6] + ' | ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | | | |')
print('----------------------')
print(' | | | |')
print(' ' + board[0] + ' | ' + board[1] + ' | ' + board[2] + ' | ' + board[3] + ' | ' + board[4])
print(' | | | |')
def win(board):
mark='X'
c=0
if(board[0] == mark and board[1] == mark and board[2] == mark and board[3] == mark and board[4]):
c+=1
if(board[5] == mark and board[6] == mark and board[7] == mark and board[8] == mark and board[9]):
c+=1
if(board[10] == mark and board[14] == mark and board[11] == mark and board[12] == mark and board[13]):
c+=1
if(board[15] == mark and board[16] == mark and board[17] == mark and board[18] == mark and board[19]) :
c+=1
if(board[21] == mark and board[20] == mark and board[23] == mark and board[24] == mark and board[22]) :
c+=1
if(board[20] == mark and board[10] == mark and board[5] == mark and board[0] == mark and board[15]) :
c+=1
if(board[21] == mark and board[16] == mark and board[11] == mark and board[6] == mark and board[1]) :
c+=1
if(board[12] == mark and board[22] == mark and board[17] == mark and board[7] == mark and board[2]) :
c+=1
if(board[18] == mark and board[13] == mark and board[23] == mark and board[8] == mark and board[3]) :
c+=1
if(board[14] == mark and board[19] == mark and board[9] == mark and board[24] == mark and board[4]) :
c+=1
if(board[12] == mark and board[20] == mark and board[16] == mark and board[8] == mark and board[4]) :
c+=1
if(board[18] == mark and board[12] == mark and board[3] == mark and board[6] == mark and board[0]) :
c+=1
if(c>=5):
return True
else:
return False
def crossing( b1,b2,ele):
ind1=b1.index(ele)
b1[ind1]='X'
ind2=b1.index(ele)
b2[ind2]='X'
print('welcome to bingo')
t=input('want to play?(enter y/Y to play)')
if t=='y' or t== 'Y':
print("welcome players\n\n\n ")
player=choose_first()
b1=get_board()
player=change_player(player)
b2=get_board()
for i in range(0,24):
player=change_player()
display_board(b1)
print(player)
i=board_input(b1)
crossing(b1,b2,i)
if win(b1)==True:
print(f'{player} won')
display_board(b1)
print(f"{player}'s board")
print('\n'*5)
change_player(player)
display_board(b2)
print(f"{player}'s board")
break
elif win(b2)==True:
change_player(player)
display_board(b2)
print(f"{player}'s board")
change_player(player)
display_board(b1)
print(f"{player}'s board")
break
else:
print('\n'*50)
change_player(player)
display_board(b1)
print(f"{player}'s board")
i=board_input(b1)
crossing(b1,b2,i)
if win(b2)==True:
print(f'{player} won')
display_board(b1)
print(f"{player}'s board")
print('\n'*5)
change_player(player)
display_board(b2)
print(f"{player}'s board")
break
elif win(b1)==True:
change_player(player)
display_board(b2)
print(f"{player}'s board")
change_player(player)
display_board(b1)
print(f"{player}'s board")
break
else:
print('\n'*50) | 0.045649 | 0.336195 |
import requests
import datetime
from config import DefaultConfig
from database import get_movies, get_recommended
from helpers.keywords import keywords_unique
from helpers.genres import genres
CONFIG = DefaultConfig()
imgURL = 'https://image.tmdb.org/t/p/'
posterURL = f'{imgURL}w342'
backdropURL = f'{imgURL}w300'
MOVIE_CARD_INDEXES = {'title',
'backdrop_path',
'overview',
'release_date',
'vote_average'}
IMG_TYPE = ((1, 'backdrops'), (2, 'posters'))
LANGUAGES = ((1, 'en-US'), (2, 'pt-BR'),)
BANDIT_N_LIST = CONFIG.BANDIT_OPTION
def movie_card(movie):
if movie:
movie_data = get_movie_card_data(movie)
movie_message = f"\U0001F3AC <b>{movie_data.get('title')} ({movie_data.get('release_date')})</b>" \
f"\n\n{movie_data.get('overview')} " \
f"\n\n\U0001F517<a href='{movie_data.get('detailsUrl')}'><i>Acessar Página do Filme</i></a>" \
f"\n\n\U0001F517<a href='{movie_data.get('posterUrl')}'><i>Acessar Poster</i></a>"
if movie_data.get('trailerUrl'):
movie_message += f"\n\U0001F517<a href='{movie_data.get('trailerUrl')}'><i>Acessar Trailer</i></a>"
return movie_message
else:
return "Ops. Desculpe. Ocorreu um erro ao retorar o filme!"
def get_movie_card_data(movie, extra=True):
movie_data = {}
movie = update_movie_info(movie)
for key in MOVIE_CARD_INDEXES:
movie_data.update({key: movie.get(key, '')})
if extra:
movie_data.update(get_movie_card_extra(movie))
return movie_data
def update_movie_info(movie):
movie = update_movie_image_path(movie)
# movie = convert_movie_dates(movie)
return movie
def get_movie_card_extra(movie):
movie_id = movie.get('_id')
backdrop = movie.get("backdrop_path")
poster = movie.get("poster_path")
trailer = get_trailer_url(movie_id)
extra_data = {'detailsUrl': f"https://www.themoviedb.org/movie/{movie_id}?language={CONFIG.TMDB_LANGUAGE}"}
if trailer:
extra_data['trailerUrl'] = trailer
if backdrop:
extra_data['backdropUrl'] = backdrop
if poster:
extra_data['posterUrl'] = poster
return extra_data
def update_movie_image_path(movie):
movie['backdrop_path'] = f"{backdropURL}{movie['backdrop_path']}"
movie['poster_path'] = f"{posterURL}{movie['poster_path']}"
return movie
def get_resource(movie_num, url='', page=1):
url = f"{CONFIG.TMDB_URL}movie/{str(movie_num)}{url}?api_key={CONFIG.TMDB_KEY}" \
f"&page={str(page)}&language={CONFIG.TMDB_LANGUAGE}"
response = requests.get(url)
return response.json()
def get_movies_resource(movie_num, url='', page=1):
response = get_resource(movie_num, url, page)
if 'results' in response:
response = response['results']
return response
def get_one_movie_resource_pt(movie_num):
movie = get_resource(movie_num)
return movie
def get_one_movie_resource_en(movie_num, page=1):
url = f"{CONFIG.TMDB_URL}movie/{str(movie_num)}?api_key={CONFIG.TMDB_KEY}" \
f"&page={str(page)}&language=en-US"
response = requests.get(url)
return response.json()
def get_movie_keywords(tmdb_id):
keywords = []
url = f"{CONFIG.TMDB_URL}movie/{str(tmdb_id)}/keywords?api_key={CONFIG.TMDB_KEY}"
response = requests.get(url).json()
for keyword in response.get('keywords'):
keyword_id = int(keyword['id'])
if keyword_id in keywords_unique:
keywords.append(f'k{keyword_id}')
return '|'.join(keywords)
def get_similar_resources(movie_id):
return get_movies_resource(movie_id, '/similar')
def get_top_rated_resources(page=1):
return get_movies_resource('', 'top_rated', page)
def get_popular_resources(page=1):
return get_movies_resource('', 'popular', page)
def get_now_playing_resources(page=1):
return get_movies_resource('', 'now_playing', page)
def get_upcoming_resources(page=1):
return get_movies_resource('', 'upcoming', page)
def get_trailer_url(movie_id):
url = f"{CONFIG.TMDB_URL}movie/{movie_id}/videos?api_key={CONFIG.TMDB_KEY}&language={CONFIG.TMDB_LANGUAGE}"
response = requests.get(url)
trailer_url = None
if response.status_code == 200 and len(response.json()['results']) > 0:
key = response.json()['results'][0].get('key')
trailer_url = f"https://www.youtube.com/watch?v={key}"
return trailer_url
def convert_movie_dates(movie):
if len(str(movie['release_date'])) > 4:
date_time_obj = datetime.datetime.strptime(movie['release_date'], '%Y-%m-%d')
movie['release_date'] = date_time_obj.strftime('%Y')
return movie
def search(search_type, query, page=1):
url = f"{CONFIG.TMDB_URL}search/{search_type}?api_key={CONFIG.TMDB_KEY}&page={str(page)}" \
f"&language={CONFIG.TMDB_LANGUAGE}" \
f"&query={query}"
response = requests.get(url).json()['results']
# TODO: Handle errors
return response
def search_movie(query, page=1):
return search('movie', query, page)
def search_keyword(query, page=1):
return search('keyword', query, page)
def discover(genres, keywords, page=1):
# genre and keyword IDs, comma separated
url = f"{CONFIG.TMDB_URL}discover/movie?api_key={CONFIG.TMDB_KEY}&page={str(page)}" \
f"&language={CONFIG.TMDB_LANGUAGE}&sort_by=popularity.desc" \
f"&with_genres={genres}"
url += f"&with_keywords={keywords}" if keywords else ""
response = requests.get(url).json()['results']
# TODO: Handle errors
return response
def get_candidates(telegram_id, genre, keyword):
candidates = {}
selected = get_recommended(telegram_id)
genre_name = genres.get(int(genre))
movies = get_movies(genre_name, keyword)
for movie in movies:
movie_id = movie.get('_id')
if movie_id not in selected and len(candidates) <= 100:
candidates[movie.get('_id')] = movie
return candidates
def get_code(resp):
return resp.split('_', 1)[1] | helpers/movie_helper.py | import requests
import datetime
from config import DefaultConfig
from database import get_movies, get_recommended
from helpers.keywords import keywords_unique
from helpers.genres import genres
CONFIG = DefaultConfig()
imgURL = 'https://image.tmdb.org/t/p/'
posterURL = f'{imgURL}w342'
backdropURL = f'{imgURL}w300'
MOVIE_CARD_INDEXES = {'title',
'backdrop_path',
'overview',
'release_date',
'vote_average'}
IMG_TYPE = ((1, 'backdrops'), (2, 'posters'))
LANGUAGES = ((1, 'en-US'), (2, 'pt-BR'),)
BANDIT_N_LIST = CONFIG.BANDIT_OPTION
def movie_card(movie):
if movie:
movie_data = get_movie_card_data(movie)
movie_message = f"\U0001F3AC <b>{movie_data.get('title')} ({movie_data.get('release_date')})</b>" \
f"\n\n{movie_data.get('overview')} " \
f"\n\n\U0001F517<a href='{movie_data.get('detailsUrl')}'><i>Acessar Página do Filme</i></a>" \
f"\n\n\U0001F517<a href='{movie_data.get('posterUrl')}'><i>Acessar Poster</i></a>"
if movie_data.get('trailerUrl'):
movie_message += f"\n\U0001F517<a href='{movie_data.get('trailerUrl')}'><i>Acessar Trailer</i></a>"
return movie_message
else:
return "Ops. Desculpe. Ocorreu um erro ao retorar o filme!"
def get_movie_card_data(movie, extra=True):
movie_data = {}
movie = update_movie_info(movie)
for key in MOVIE_CARD_INDEXES:
movie_data.update({key: movie.get(key, '')})
if extra:
movie_data.update(get_movie_card_extra(movie))
return movie_data
def update_movie_info(movie):
movie = update_movie_image_path(movie)
# movie = convert_movie_dates(movie)
return movie
def get_movie_card_extra(movie):
movie_id = movie.get('_id')
backdrop = movie.get("backdrop_path")
poster = movie.get("poster_path")
trailer = get_trailer_url(movie_id)
extra_data = {'detailsUrl': f"https://www.themoviedb.org/movie/{movie_id}?language={CONFIG.TMDB_LANGUAGE}"}
if trailer:
extra_data['trailerUrl'] = trailer
if backdrop:
extra_data['backdropUrl'] = backdrop
if poster:
extra_data['posterUrl'] = poster
return extra_data
def update_movie_image_path(movie):
movie['backdrop_path'] = f"{backdropURL}{movie['backdrop_path']}"
movie['poster_path'] = f"{posterURL}{movie['poster_path']}"
return movie
def get_resource(movie_num, url='', page=1):
url = f"{CONFIG.TMDB_URL}movie/{str(movie_num)}{url}?api_key={CONFIG.TMDB_KEY}" \
f"&page={str(page)}&language={CONFIG.TMDB_LANGUAGE}"
response = requests.get(url)
return response.json()
def get_movies_resource(movie_num, url='', page=1):
response = get_resource(movie_num, url, page)
if 'results' in response:
response = response['results']
return response
def get_one_movie_resource_pt(movie_num):
movie = get_resource(movie_num)
return movie
def get_one_movie_resource_en(movie_num, page=1):
url = f"{CONFIG.TMDB_URL}movie/{str(movie_num)}?api_key={CONFIG.TMDB_KEY}" \
f"&page={str(page)}&language=en-US"
response = requests.get(url)
return response.json()
def get_movie_keywords(tmdb_id):
keywords = []
url = f"{CONFIG.TMDB_URL}movie/{str(tmdb_id)}/keywords?api_key={CONFIG.TMDB_KEY}"
response = requests.get(url).json()
for keyword in response.get('keywords'):
keyword_id = int(keyword['id'])
if keyword_id in keywords_unique:
keywords.append(f'k{keyword_id}')
return '|'.join(keywords)
def get_similar_resources(movie_id):
return get_movies_resource(movie_id, '/similar')
def get_top_rated_resources(page=1):
return get_movies_resource('', 'top_rated', page)
def get_popular_resources(page=1):
return get_movies_resource('', 'popular', page)
def get_now_playing_resources(page=1):
return get_movies_resource('', 'now_playing', page)
def get_upcoming_resources(page=1):
return get_movies_resource('', 'upcoming', page)
def get_trailer_url(movie_id):
url = f"{CONFIG.TMDB_URL}movie/{movie_id}/videos?api_key={CONFIG.TMDB_KEY}&language={CONFIG.TMDB_LANGUAGE}"
response = requests.get(url)
trailer_url = None
if response.status_code == 200 and len(response.json()['results']) > 0:
key = response.json()['results'][0].get('key')
trailer_url = f"https://www.youtube.com/watch?v={key}"
return trailer_url
def convert_movie_dates(movie):
if len(str(movie['release_date'])) > 4:
date_time_obj = datetime.datetime.strptime(movie['release_date'], '%Y-%m-%d')
movie['release_date'] = date_time_obj.strftime('%Y')
return movie
def search(search_type, query, page=1):
url = f"{CONFIG.TMDB_URL}search/{search_type}?api_key={CONFIG.TMDB_KEY}&page={str(page)}" \
f"&language={CONFIG.TMDB_LANGUAGE}" \
f"&query={query}"
response = requests.get(url).json()['results']
# TODO: Handle errors
return response
def search_movie(query, page=1):
return search('movie', query, page)
def search_keyword(query, page=1):
return search('keyword', query, page)
def discover(genres, keywords, page=1):
# genre and keyword IDs, comma separated
url = f"{CONFIG.TMDB_URL}discover/movie?api_key={CONFIG.TMDB_KEY}&page={str(page)}" \
f"&language={CONFIG.TMDB_LANGUAGE}&sort_by=popularity.desc" \
f"&with_genres={genres}"
url += f"&with_keywords={keywords}" if keywords else ""
response = requests.get(url).json()['results']
# TODO: Handle errors
return response
def get_candidates(telegram_id, genre, keyword):
candidates = {}
selected = get_recommended(telegram_id)
genre_name = genres.get(int(genre))
movies = get_movies(genre_name, keyword)
for movie in movies:
movie_id = movie.get('_id')
if movie_id not in selected and len(candidates) <= 100:
candidates[movie.get('_id')] = movie
return candidates
def get_code(resp):
return resp.split('_', 1)[1] | 0.230833 | 0.091301 |
import logging
from neon.backends.backend import Block
from neon.backends.gpu import GPU
from nervanagpu import NervanaGPU, GPUTensor
import pycuda.driver as drv
import numpy as np
from functools import wraps
import atexit
logger = logging.getLogger(__name__)
def replicate(method):
def decorate(cls):
@wraps(cls)
def func(self, *args, **kwargs):
if self.ng.block is not None:
self.call_stack.append((method, args, kwargs))
return
else:
tsrlist = []
for idx, ctx in enumerate(getattr(self, 'ctxs')):
ctx.push()
self.ng.stream = self.strms[idx]
myargs = [a.tlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v.tlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
tsrlist.append(
getattr(super(cls, self), method)(*myargs, **mykwargs))
self.ng.stream = None
ctx.pop()
return MGPUTensor(tsrlist) if tsrlist[0] is not None else None
setattr(cls, method, func)
return cls
return decorate
def passthru(method):
def decorate(cls):
@wraps(cls)
def func(self, *args, **kwargs):
tsrlist = []
for idx, (tsr, ctx) in enumerate(zip(getattr(self, '_tensorlist'),
getattr(self, 'ctxs'))):
ctx.push()
myargs = [a.tlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v.tlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
tsrlist.append(getattr(tsr, method)(*myargs, **mykwargs))
ctx.pop()
if tsrlist[0] is not None:
return MGPUTensor(tsrlist, ptype=self.ptype)
setattr(cls, method, func)
return cls
return decorate
@passthru('_assign')
@passthru('fill')
@passthru('reshape')
@passthru('copy_from')
@passthru('__getitem__')
@passthru('__add__')
@passthru('__sub__')
@passthru('__mul__')
@passthru('__div__')
@passthru('__truediv__')
@passthru('__pow__')
@passthru('__radd__')
@passthru('__rsub__')
@passthru('__rmul__')
@passthru('__rdiv__')
@passthru('__ne__')
@passthru('__eq__')
class MGPUTensor(object):
ctxs = None
num_dev = 0
def __init__(self, tensorlist, ptype='fragment'):
self._tensorlist = tensorlist
self.ptype = ptype
@property
def shape(self):
return self._tensorlist[0].shape
@property
def dtype(self):
return self._tensorlist[0].dtype
@property
def size(self):
return self._tensorlist[0].size
@property
def is_contiguous(self):
return self._tensorlist[0].is_contiguous
@property
def tlist(self):
return self._tensorlist
@property
def ptr(self):
return self._tensorlist[0].gpudata.__int__()
def __setitem__(self, index, value):
if self.ctxs is None:
raise ValueError("Contexts not defined")
for idx, (tsr, ctx) in enumerate(zip(getattr(self, '_tensorlist'),
getattr(self, 'ctxs'))):
ctx.push()
if isinstance(value, MGPUTensor):
tsr.__setitem__(index, value._tensorlist[idx])
else:
tsr.__setitem__(index, value)
ctx.pop()
def asnumpyarray(self):
if self.ptype == 'replica':
self.ctxs[0].push()
rval = self._tensorlist[0].get()
self.ctxs[0].pop()
return rval
else:
rval = []
for subtensor, ctx in zip(self.tlist, self.ctxs):
ctx.push()
npv = subtensor.get()
rval.append(npv)
ctx.pop()
if self.ptype == 'vfragment':
return np.vstack(rval)
else:
return np.hstack(rval)
@property
def T(self): # noqa
"""
return a transposed view
"""
tsrlist = []
for tsr in self._tensorlist:
tsrlist.append(GPUTensor(backend=tsr.backend,
shape=tsr.shape[::-1], dtype=tsr.dtype,
allocator=tsr.allocator, base=tsr,
gpudata=tsr.gpudata,
strides=tsr.strides[::-1],
is_trans=(not tsr.is_trans),
name=tsr.name, rounding=tsr.rounding))
return self.__class__(tsrlist)
@replicate('fprop_conv')
@replicate('convolution')
@replicate('bprop_conv')
@replicate('update_conv')
@replicate('fprop_pool')
@replicate('bprop_pool')
@replicate('logistic')
@replicate('rectlin')
@replicate('rectlin_derivative')
@replicate('rectleaky')
@replicate('rectleaky_derivative')
@replicate('sum')
@replicate('mean')
@replicate('min')
@replicate('max')
@replicate('variance')
@replicate('fabs')
@replicate('sqrt')
@replicate('zeros')
@replicate('ones')
@replicate('empty')
@replicate('array')
@replicate('add')
@replicate('subtract')
@replicate('multiply')
@replicate('divide')
@replicate('greater')
@replicate('equal')
@replicate('not_equal')
@replicate('clip')
@replicate('log')
@replicate('tanh')
@replicate('argmax')
@replicate('softmax')
@replicate('softmax_gradient')
@replicate('make_binary_mask')
@replicate('gdm_compound')
@replicate('gdmwd_compound')
@replicate('ada_update')
@replicate('crossent')
@replicate('transpose')
@replicate('logistic_compound')
@replicate('fprop_bn_compound')
@replicate('bprop_bn_compound')
class MGPU(GPU):
default_dtype = np.float32
num_dev = 1
is_dist = True
def __init__(self, rng_seed, stochastic_round=False, device_id=0,
num_dev=2):
drv.init()
self.num_dev = num_dev
if device_id == 0:
self.dev_list = range(num_dev)
else:
self.dev_list = device_id
assert len(self.dev_list) == self.num_dev
assert self.num_dev <= drv.Device.count()
self.ctxs = []
self.devs = []
self._strms = []
self._redstrms = []
self._events = []
self._redevents = []
self.async = True
self._nostrms = [None for i in self.dev_list]
for i in self.dev_list:
self.devs.append(drv.Device(i))
for dev in self.devs:
self.ctxs.append(
dev.make_context(drv.ctx_flags.SCHED_BLOCKING_SYNC))
self._strms.append(drv.Stream())
self._redstrms.append(drv.Stream())
self._events.append(drv.Event())
self._redevents.append(drv.Event())
drv.Context.pop()
self.ctxs[0].push()
atexit.register(drv.Context.pop)
MGPUTensor.ctxs = self.ctxs
MGPUTensor.num_dev = num_dev
self.ng = NervanaGPU(stochastic_round=stochastic_round)
logger.info("Initialized %d device NervanaGPU, stochastic_round=%s",
num_dev, stochastic_round)
self.ng.block = None
self.rng_seed = rng_seed
self.rng_init()
# Setup the pairwise contexts
# TODO clean up this code to avoid indexing
for dev1, ctx1 in zip(self.devs, self.ctxs):
ctx1.push()
for dev2, ctx2 in zip(self.devs, self.ctxs):
if dev1 == dev2:
continue
if dev1.can_access_peer(dev2):
ctx1.enable_peer_access(ctx2)
else:
print('Cannot enable peer access between '
'{:d} and {:d}'.format(dev1, dev2))
ctx1.pop()
def make_events(self):
evtlist = []
for ctx in self.ctxs:
ctx.push()
evtlist.append(drv.Event())
ctx.pop()
return evtlist
# These definitions are for performing grouped context commands
# This is experimental and should remove _stack for actual usage
def begin_stack(self, block, identifier):
if block == Block.update:
self.ng.block = Block.update
self.call_stack = []
else:
pass
def end_stack(self, block, identifier):
if block == Block.update:
self.ng.block = None
for idx, ctx in enumerate(self.ctxs):
ctx.push()
self.ng.stream = self.strms[idx]
for method, args, kwargs in self.call_stack:
myargs = [a._tensorlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v._tensorlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
getattr(super(MGPU, self), method)(*myargs, **mykwargs)
self.ng.stream = None
ctx.pop()
self.call_stack = None
else:
pass
@property
def strms(self):
return self._strms if self.async else self._nostrms
@property
def redstrms(self):
return self._redstrms if self.async else self._nostrms
def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype,
name=None, persist_values=True, ptype='replica'):
"""
generate numpy random number and convert to a GPUTensor.
If called with dtype=None it will probably explode
"""
assert len(size) == 2
result = self.empty(size, dtype=dtype, persist_values=persist_values)
result.ptype = ptype
beshape = size if ptype == 'replica' else (self.num_dev * size[0],
size[1])
ary = np.random.uniform(low, high, beshape).astype(dtype)
self.set(result, ary)
return result
def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype,
name=None, persist_values=True, ptype='replica'):
"""
Gaussian/Normal random number sample generation
"""
assert len(size) == 2
result = self.empty(size, dtype=dtype, persist_values=persist_values)
result.ptype = ptype
beshape = size if ptype == 'replica' else (self.num_dev * size[0],
size[1])
ary = np.random.normal(loc, scale, beshape).astype(dtype)
self.set(result, ary)
return result
def synchronize(self):
if not self.async:
return
for s in self.strms:
s.synchronize()
def redsynchronize(self):
if not self.async:
return
for s in self.redstrms:
s.synchronize()
def allocate_fragment(self, shape, dtype=default_dtype,
persist_values=True):
# TODO: set ptype to be fragment in this case ??
return self.empty((shape[0], shape[1] / self.num_dev), dtype,
persist_values=persist_values)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
result.ptype = ary.ptype
return result
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
result.ptype = ary.ptype
return result
def set(self, tensor, data):
assert isinstance(tensor, MGPUTensor)
if tensor.ptype == 'replica':
for dest, strm, ctx in zip(tensor.tlist, self.strms, self.ctxs):
ctx.push()
drv.memcpy_htod_async(dest.ptr, data, strm)
ctx.pop()
# tensor.copy_from(data)
else:
self.scatter(data, tensor)
def scatter(self, hbuf, dbuf):
'''
scatters the array data in hbuf to the mgpu tensor
assumes that dbuf is a M x N and hbuf is M x (Nxk) where k is the
number of replicas
also assumes that dtype of hbuf and dbuf are the same
'''
assert hbuf.size == dbuf.size * dbuf.num_dev
assert isinstance(dbuf, MGPUTensor)
assert hbuf.dtype == dbuf.dtype
ndata = dbuf.size
starts = [i * ndata for i in range(self.num_dev)]
for dest, strm, ctx, doff in zip(dbuf.tlist, self.strms, self.ctxs,
starts):
src = hbuf.reshape((hbuf.size))[doff:(doff + ndata)]
ctx.push()
drv.memcpy_htod_async(dest.ptr, src, strm)
ctx.pop()
self.synchronize()
def fprop_fc(self, out, inputs, weights, layer=None):
"""
In this case, the weights are shards, the acts are replicas
ubuf should be of size nout/num_dev x mbsz
"""
ubuf = layer.mempool[0]
assert ubuf.shape == (weights.shape[0], inputs.shape[1])
if layer.use_biases:
biases = layer.biases.tlist
else:
biases = [None for i in range(self.num_dev)]
for dbuf, ibuf, wt, bs, strm, ctx in zip(ubuf.tlist, inputs.tlist,
weights.tlist, biases,
self.strms, self.ctxs):
ctx.push()
self.ng.stream = strm
self.ng.dot(wt, ibuf, dbuf)
if layer.use_biases:
self.ng.add(dbuf, bs, out=dbuf)
ctx.pop()
# Note, should be safe not to sync because each fragment is computed
# on the same stream that originates the copy
# self.synchronize()
self.fragment_to_replica(ubuf, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
ubuf = layer.mempool[1]
wtsz = weights.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
assert out.shape == (weights.shape[1], deltas.shape[1])
assert ubuf.shape == out.shape
for dbuf, ibuf, wt, strm, ctx, off in zip(out.tlist, deltas.tlist,
weights.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.dot(wt.T, ibuf[off:(off + wtsz)], dbuf)
ctx.pop()
# Note, should be safe not to sync because each fragment is computed
# on the same stream that originates the copy
self.synchronize()
self.reduce(out, ubuf)
def update_fc(self, out, inputs, deltas, layer=None):
wtsz = out.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
for obuf, dbuf, ibuf, strm, ctx, off in zip(out.tlist, deltas.tlist,
inputs.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.dot(dbuf[off:(off + wtsz)], ibuf.T, obuf)
ctx.pop()
# self.synchronize()
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
err (GPUTensor): backpropagated error
"""
wtsz = out.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
for ebuf, obuf, strm, ctx, off in zip(err.tlist, out.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.sum(ebuf[off:(off + wtsz)], axis=1, out=obuf)
ctx.pop()
def add_fc_bias(self, inputs, bias):
"""
This is a no-op since we absorb the bias add into the fprop_fc call
"""
pass
def reduce_tensor(self, ary, async=True):
'''
This is the case for the scalar tensor
'''
assert ary.size == 1
if ary.ptype == 'replica':
self.ctxs[0].push()
result = ary.tlist[0].get()
self.ctxs[0].pop()
return result
result = np.zeros((self.num_dev, 1), ary.dtype)
for i, (ctx, src_buf, strm) in enumerate(zip(
self.ctxs, ary.tlist, self.strms)):
ctx.push()
drv.memcpy_dtoh_async(result[i], src_buf.ptr, strm)
ctx.pop()
self.synchronize()
return result.sum()
def replica_to_fragment(self, reptsr, fragtsr):
'''
Scatters the replica into the fragments (this just discards, so no p2p
communication necessary
'''
numrep = self.num_dev
fragsz = fragtsr.size
dsz = fragtsr.dtype.itemsize
assert reptsr.size == fragsz * numrep
strms = self.strms
starts = [i * fragsz for i in range(numrep)]
for dbuf, sbuf, ctx, offset, strm in zip(fragtsr.tlist, reptsr.tlist,
self.ctxs, starts, strms):
ctx.push()
drv.memcpy_dtod_async(dbuf.ptr, sbuf.ptr + offset * dsz,
fragsz * dsz, strm)
ctx.pop()
self.synchronize()
def fragment_to_replica(self, fragtsr, reptsr):
'''
Gathers the fragments from fragtsr into reptsr
'''
numrep = self.num_dev
fragsz = fragtsr.size
dsz = fragtsr.dtype.itemsize
assert reptsr.size == fragsz * numrep
assert fragtsr.is_contiguous
starts = [i * fragsz for i in range(numrep)]
for dbuf, dctx in zip(reptsr.tlist, self.ctxs):
for sbuf, sctx, soff, strm in zip(fragtsr.tlist, self.ctxs,
starts, self.strms):
myargs = [dbuf.ptr + soff * dsz, sbuf.ptr, fragsz * dsz]
if sctx == dctx:
cpfunc = drv.memcpy_dtod_async
else:
cpfunc = drv.memcpy_peer_async
myargs.extend([dctx, sctx])
myargs.append(strm)
sctx.push()
cpfunc(*myargs)
sctx.pop()
self.synchronize()
def share_activations(self, in_acts, out_acts, tmpbufs):
"""
Not ideal we have to do this, but for now this is enough to get things
working
Placeholder function to deal with the facts that we lay our tensors
out in column-major form. Fast inplace transpose can be placed in
here for improved performance
in_acts: fragment tensor to be joined
out_acts: replica tensor to receive
tmpbufs: contains temporary storage for the transposes
"""
assert in_acts.shape[0] == out_acts.shape[0]
assert in_acts.shape[1] == out_acts.shape[1] / self.num_dev
(rbufT, fbufT) = tmpbufs
assert fbufT.shape == in_acts.shape[::-1]
assert rbufT.shape == out_acts.shape[::-1]
self.transpose(in_acts, fbufT)
self.fragment_to_replica(fbufT, rbufT)
self.transpose(rbufT, out_acts)
def split_activations(self, in_acts, out_acts, tmpbufs):
"""
Not ideal we have to do this, but for now this is enough to get things
working
Placeholder function to deal with the facts that we lay our tensors
out in column-major form. Fast inplace transpose can be placed in
here for improved performance
in_acts: replica tensor to be split into fragments
out_acts: fragment tensor to receive
tmpbufs: contains temporary storage for the transposes
"""
assert in_acts.shape[0] == out_acts.shape[0]
assert in_acts.shape[1] == out_acts.shape[1] * self.num_dev
(rbufT, fbufT) = tmpbufs
assert fbufT.shape == out_acts.shape[::-1]
assert rbufT.shape == in_acts.shape[::-1]
self.transpose(in_acts, rbufT)
self.replica_to_fragment(rbufT, fbufT)
self.transpose(fbufT, out_acts)
def reduce(self, ary, ubuf):
"""
Does a summation reduction of the fragments in ary and rebroadcasts
them into ary using butterfly reduction. Uses ubuf as a temporary
storage buffer.
Requires that ubuf has same dtype as ary, and the size of ubuf is large
enough to store the broadcasted sub-fragments of ary.
"""
numrep = self.num_dev
totsz = ary.size
subsz = (totsz + numrep - 1) / numrep
dsz = ary.dtype.itemsize
assert ubuf.size == subsz * numrep
starts = [i * subsz for i in range(numrep)]
strmlist = self.redstrms
evtlist = self._redevents
# GATHER SUB-FRAGMENTS
for dbuf, dctx, doff, dstrm, evt in zip(ubuf.tlist, self.ctxs, starts,
strmlist, evtlist):
for sbuf, sctx, soff, strm in zip(ary.tlist, self.ctxs,
starts, strmlist):
myargs = [dbuf.ptr + soff * dsz,
sbuf.ptr + doff * dsz,
min(subsz, totsz - doff) * dsz]
if sctx == dctx:
cpfunc = drv.memcpy_dtod_async
else:
cpfunc = drv.memcpy_peer_async
myargs.extend([dctx, sctx])
myargs.append(strm)
cpfunc(*myargs)
evt.record(dstrm)
# REDUCTION of SUB-FRAGMENTS
for sbuf, dbuf, sctx, soff, strm, evt in zip(ary.tlist, ubuf.tlist,
self.ctxs, starts,
strmlist, evtlist):
self.ng.stream = strm
end = soff + min(subsz, totsz - soff)
sbuf = sbuf.reshape((totsz, 1))
ubtmp = dbuf.reshape((numrep, dbuf.size / numrep))
sctx.push()
strm.wait_for_event(evt)
self.ng.sum(ubtmp, axis=0, out=sbuf[soff:end])
sctx.pop()
# REBROADCAST
for dbuf, dctx in zip(ary.tlist, self.ctxs):
for sbuf, sctx, soff, strm in zip(ary.tlist, self.ctxs,
starts, strmlist):
if sctx != dctx:
drv.memcpy_peer_async(dbuf.ptr + soff * dsz,
sbuf.ptr + soff * dsz,
min(subsz, totsz - soff) * dsz,
dctx, sctx, strm)
return ary | neon/backends/mgpu.py | import logging
from neon.backends.backend import Block
from neon.backends.gpu import GPU
from nervanagpu import NervanaGPU, GPUTensor
import pycuda.driver as drv
import numpy as np
from functools import wraps
import atexit
logger = logging.getLogger(__name__)
def replicate(method):
def decorate(cls):
@wraps(cls)
def func(self, *args, **kwargs):
if self.ng.block is not None:
self.call_stack.append((method, args, kwargs))
return
else:
tsrlist = []
for idx, ctx in enumerate(getattr(self, 'ctxs')):
ctx.push()
self.ng.stream = self.strms[idx]
myargs = [a.tlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v.tlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
tsrlist.append(
getattr(super(cls, self), method)(*myargs, **mykwargs))
self.ng.stream = None
ctx.pop()
return MGPUTensor(tsrlist) if tsrlist[0] is not None else None
setattr(cls, method, func)
return cls
return decorate
def passthru(method):
def decorate(cls):
@wraps(cls)
def func(self, *args, **kwargs):
tsrlist = []
for idx, (tsr, ctx) in enumerate(zip(getattr(self, '_tensorlist'),
getattr(self, 'ctxs'))):
ctx.push()
myargs = [a.tlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v.tlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
tsrlist.append(getattr(tsr, method)(*myargs, **mykwargs))
ctx.pop()
if tsrlist[0] is not None:
return MGPUTensor(tsrlist, ptype=self.ptype)
setattr(cls, method, func)
return cls
return decorate
@passthru('_assign')
@passthru('fill')
@passthru('reshape')
@passthru('copy_from')
@passthru('__getitem__')
@passthru('__add__')
@passthru('__sub__')
@passthru('__mul__')
@passthru('__div__')
@passthru('__truediv__')
@passthru('__pow__')
@passthru('__radd__')
@passthru('__rsub__')
@passthru('__rmul__')
@passthru('__rdiv__')
@passthru('__ne__')
@passthru('__eq__')
class MGPUTensor(object):
ctxs = None
num_dev = 0
def __init__(self, tensorlist, ptype='fragment'):
self._tensorlist = tensorlist
self.ptype = ptype
@property
def shape(self):
return self._tensorlist[0].shape
@property
def dtype(self):
return self._tensorlist[0].dtype
@property
def size(self):
return self._tensorlist[0].size
@property
def is_contiguous(self):
return self._tensorlist[0].is_contiguous
@property
def tlist(self):
return self._tensorlist
@property
def ptr(self):
return self._tensorlist[0].gpudata.__int__()
def __setitem__(self, index, value):
if self.ctxs is None:
raise ValueError("Contexts not defined")
for idx, (tsr, ctx) in enumerate(zip(getattr(self, '_tensorlist'),
getattr(self, 'ctxs'))):
ctx.push()
if isinstance(value, MGPUTensor):
tsr.__setitem__(index, value._tensorlist[idx])
else:
tsr.__setitem__(index, value)
ctx.pop()
def asnumpyarray(self):
if self.ptype == 'replica':
self.ctxs[0].push()
rval = self._tensorlist[0].get()
self.ctxs[0].pop()
return rval
else:
rval = []
for subtensor, ctx in zip(self.tlist, self.ctxs):
ctx.push()
npv = subtensor.get()
rval.append(npv)
ctx.pop()
if self.ptype == 'vfragment':
return np.vstack(rval)
else:
return np.hstack(rval)
@property
def T(self): # noqa
"""
return a transposed view
"""
tsrlist = []
for tsr in self._tensorlist:
tsrlist.append(GPUTensor(backend=tsr.backend,
shape=tsr.shape[::-1], dtype=tsr.dtype,
allocator=tsr.allocator, base=tsr,
gpudata=tsr.gpudata,
strides=tsr.strides[::-1],
is_trans=(not tsr.is_trans),
name=tsr.name, rounding=tsr.rounding))
return self.__class__(tsrlist)
@replicate('fprop_conv')
@replicate('convolution')
@replicate('bprop_conv')
@replicate('update_conv')
@replicate('fprop_pool')
@replicate('bprop_pool')
@replicate('logistic')
@replicate('rectlin')
@replicate('rectlin_derivative')
@replicate('rectleaky')
@replicate('rectleaky_derivative')
@replicate('sum')
@replicate('mean')
@replicate('min')
@replicate('max')
@replicate('variance')
@replicate('fabs')
@replicate('sqrt')
@replicate('zeros')
@replicate('ones')
@replicate('empty')
@replicate('array')
@replicate('add')
@replicate('subtract')
@replicate('multiply')
@replicate('divide')
@replicate('greater')
@replicate('equal')
@replicate('not_equal')
@replicate('clip')
@replicate('log')
@replicate('tanh')
@replicate('argmax')
@replicate('softmax')
@replicate('softmax_gradient')
@replicate('make_binary_mask')
@replicate('gdm_compound')
@replicate('gdmwd_compound')
@replicate('ada_update')
@replicate('crossent')
@replicate('transpose')
@replicate('logistic_compound')
@replicate('fprop_bn_compound')
@replicate('bprop_bn_compound')
class MGPU(GPU):
default_dtype = np.float32
num_dev = 1
is_dist = True
def __init__(self, rng_seed, stochastic_round=False, device_id=0,
num_dev=2):
drv.init()
self.num_dev = num_dev
if device_id == 0:
self.dev_list = range(num_dev)
else:
self.dev_list = device_id
assert len(self.dev_list) == self.num_dev
assert self.num_dev <= drv.Device.count()
self.ctxs = []
self.devs = []
self._strms = []
self._redstrms = []
self._events = []
self._redevents = []
self.async = True
self._nostrms = [None for i in self.dev_list]
for i in self.dev_list:
self.devs.append(drv.Device(i))
for dev in self.devs:
self.ctxs.append(
dev.make_context(drv.ctx_flags.SCHED_BLOCKING_SYNC))
self._strms.append(drv.Stream())
self._redstrms.append(drv.Stream())
self._events.append(drv.Event())
self._redevents.append(drv.Event())
drv.Context.pop()
self.ctxs[0].push()
atexit.register(drv.Context.pop)
MGPUTensor.ctxs = self.ctxs
MGPUTensor.num_dev = num_dev
self.ng = NervanaGPU(stochastic_round=stochastic_round)
logger.info("Initialized %d device NervanaGPU, stochastic_round=%s",
num_dev, stochastic_round)
self.ng.block = None
self.rng_seed = rng_seed
self.rng_init()
# Setup the pairwise contexts
# TODO clean up this code to avoid indexing
for dev1, ctx1 in zip(self.devs, self.ctxs):
ctx1.push()
for dev2, ctx2 in zip(self.devs, self.ctxs):
if dev1 == dev2:
continue
if dev1.can_access_peer(dev2):
ctx1.enable_peer_access(ctx2)
else:
print('Cannot enable peer access between '
'{:d} and {:d}'.format(dev1, dev2))
ctx1.pop()
def make_events(self):
evtlist = []
for ctx in self.ctxs:
ctx.push()
evtlist.append(drv.Event())
ctx.pop()
return evtlist
# These definitions are for performing grouped context commands
# This is experimental and should remove _stack for actual usage
def begin_stack(self, block, identifier):
if block == Block.update:
self.ng.block = Block.update
self.call_stack = []
else:
pass
def end_stack(self, block, identifier):
if block == Block.update:
self.ng.block = None
for idx, ctx in enumerate(self.ctxs):
ctx.push()
self.ng.stream = self.strms[idx]
for method, args, kwargs in self.call_stack:
myargs = [a._tensorlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v._tensorlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
getattr(super(MGPU, self), method)(*myargs, **mykwargs)
self.ng.stream = None
ctx.pop()
self.call_stack = None
else:
pass
@property
def strms(self):
return self._strms if self.async else self._nostrms
@property
def redstrms(self):
return self._redstrms if self.async else self._nostrms
def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype,
name=None, persist_values=True, ptype='replica'):
"""
generate numpy random number and convert to a GPUTensor.
If called with dtype=None it will probably explode
"""
assert len(size) == 2
result = self.empty(size, dtype=dtype, persist_values=persist_values)
result.ptype = ptype
beshape = size if ptype == 'replica' else (self.num_dev * size[0],
size[1])
ary = np.random.uniform(low, high, beshape).astype(dtype)
self.set(result, ary)
return result
def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype,
name=None, persist_values=True, ptype='replica'):
"""
Gaussian/Normal random number sample generation
"""
assert len(size) == 2
result = self.empty(size, dtype=dtype, persist_values=persist_values)
result.ptype = ptype
beshape = size if ptype == 'replica' else (self.num_dev * size[0],
size[1])
ary = np.random.normal(loc, scale, beshape).astype(dtype)
self.set(result, ary)
return result
def synchronize(self):
if not self.async:
return
for s in self.strms:
s.synchronize()
def redsynchronize(self):
if not self.async:
return
for s in self.redstrms:
s.synchronize()
def allocate_fragment(self, shape, dtype=default_dtype,
persist_values=True):
# TODO: set ptype to be fragment in this case ??
return self.empty((shape[0], shape[1] / self.num_dev), dtype,
persist_values=persist_values)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
result.ptype = ary.ptype
return result
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
result.ptype = ary.ptype
return result
def set(self, tensor, data):
assert isinstance(tensor, MGPUTensor)
if tensor.ptype == 'replica':
for dest, strm, ctx in zip(tensor.tlist, self.strms, self.ctxs):
ctx.push()
drv.memcpy_htod_async(dest.ptr, data, strm)
ctx.pop()
# tensor.copy_from(data)
else:
self.scatter(data, tensor)
def scatter(self, hbuf, dbuf):
'''
scatters the array data in hbuf to the mgpu tensor
assumes that dbuf is a M x N and hbuf is M x (Nxk) where k is the
number of replicas
also assumes that dtype of hbuf and dbuf are the same
'''
assert hbuf.size == dbuf.size * dbuf.num_dev
assert isinstance(dbuf, MGPUTensor)
assert hbuf.dtype == dbuf.dtype
ndata = dbuf.size
starts = [i * ndata for i in range(self.num_dev)]
for dest, strm, ctx, doff in zip(dbuf.tlist, self.strms, self.ctxs,
starts):
src = hbuf.reshape((hbuf.size))[doff:(doff + ndata)]
ctx.push()
drv.memcpy_htod_async(dest.ptr, src, strm)
ctx.pop()
self.synchronize()
def fprop_fc(self, out, inputs, weights, layer=None):
"""
In this case, the weights are shards, the acts are replicas
ubuf should be of size nout/num_dev x mbsz
"""
ubuf = layer.mempool[0]
assert ubuf.shape == (weights.shape[0], inputs.shape[1])
if layer.use_biases:
biases = layer.biases.tlist
else:
biases = [None for i in range(self.num_dev)]
for dbuf, ibuf, wt, bs, strm, ctx in zip(ubuf.tlist, inputs.tlist,
weights.tlist, biases,
self.strms, self.ctxs):
ctx.push()
self.ng.stream = strm
self.ng.dot(wt, ibuf, dbuf)
if layer.use_biases:
self.ng.add(dbuf, bs, out=dbuf)
ctx.pop()
# Note, should be safe not to sync because each fragment is computed
# on the same stream that originates the copy
# self.synchronize()
self.fragment_to_replica(ubuf, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
ubuf = layer.mempool[1]
wtsz = weights.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
assert out.shape == (weights.shape[1], deltas.shape[1])
assert ubuf.shape == out.shape
for dbuf, ibuf, wt, strm, ctx, off in zip(out.tlist, deltas.tlist,
weights.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.dot(wt.T, ibuf[off:(off + wtsz)], dbuf)
ctx.pop()
# Note, should be safe not to sync because each fragment is computed
# on the same stream that originates the copy
self.synchronize()
self.reduce(out, ubuf)
def update_fc(self, out, inputs, deltas, layer=None):
wtsz = out.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
for obuf, dbuf, ibuf, strm, ctx, off in zip(out.tlist, deltas.tlist,
inputs.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.dot(dbuf[off:(off + wtsz)], ibuf.T, obuf)
ctx.pop()
# self.synchronize()
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
err (GPUTensor): backpropagated error
"""
wtsz = out.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
for ebuf, obuf, strm, ctx, off in zip(err.tlist, out.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.sum(ebuf[off:(off + wtsz)], axis=1, out=obuf)
ctx.pop()
def add_fc_bias(self, inputs, bias):
"""
This is a no-op since we absorb the bias add into the fprop_fc call
"""
pass
def reduce_tensor(self, ary, async=True):
'''
This is the case for the scalar tensor
'''
assert ary.size == 1
if ary.ptype == 'replica':
self.ctxs[0].push()
result = ary.tlist[0].get()
self.ctxs[0].pop()
return result
result = np.zeros((self.num_dev, 1), ary.dtype)
for i, (ctx, src_buf, strm) in enumerate(zip(
self.ctxs, ary.tlist, self.strms)):
ctx.push()
drv.memcpy_dtoh_async(result[i], src_buf.ptr, strm)
ctx.pop()
self.synchronize()
return result.sum()
def replica_to_fragment(self, reptsr, fragtsr):
'''
Scatters the replica into the fragments (this just discards, so no p2p
communication necessary
'''
numrep = self.num_dev
fragsz = fragtsr.size
dsz = fragtsr.dtype.itemsize
assert reptsr.size == fragsz * numrep
strms = self.strms
starts = [i * fragsz for i in range(numrep)]
for dbuf, sbuf, ctx, offset, strm in zip(fragtsr.tlist, reptsr.tlist,
self.ctxs, starts, strms):
ctx.push()
drv.memcpy_dtod_async(dbuf.ptr, sbuf.ptr + offset * dsz,
fragsz * dsz, strm)
ctx.pop()
self.synchronize()
def fragment_to_replica(self, fragtsr, reptsr):
'''
Gathers the fragments from fragtsr into reptsr
'''
numrep = self.num_dev
fragsz = fragtsr.size
dsz = fragtsr.dtype.itemsize
assert reptsr.size == fragsz * numrep
assert fragtsr.is_contiguous
starts = [i * fragsz for i in range(numrep)]
for dbuf, dctx in zip(reptsr.tlist, self.ctxs):
for sbuf, sctx, soff, strm in zip(fragtsr.tlist, self.ctxs,
starts, self.strms):
myargs = [dbuf.ptr + soff * dsz, sbuf.ptr, fragsz * dsz]
if sctx == dctx:
cpfunc = drv.memcpy_dtod_async
else:
cpfunc = drv.memcpy_peer_async
myargs.extend([dctx, sctx])
myargs.append(strm)
sctx.push()
cpfunc(*myargs)
sctx.pop()
self.synchronize()
def share_activations(self, in_acts, out_acts, tmpbufs):
"""
Not ideal we have to do this, but for now this is enough to get things
working
Placeholder function to deal with the facts that we lay our tensors
out in column-major form. Fast inplace transpose can be placed in
here for improved performance
in_acts: fragment tensor to be joined
out_acts: replica tensor to receive
tmpbufs: contains temporary storage for the transposes
"""
assert in_acts.shape[0] == out_acts.shape[0]
assert in_acts.shape[1] == out_acts.shape[1] / self.num_dev
(rbufT, fbufT) = tmpbufs
assert fbufT.shape == in_acts.shape[::-1]
assert rbufT.shape == out_acts.shape[::-1]
self.transpose(in_acts, fbufT)
self.fragment_to_replica(fbufT, rbufT)
self.transpose(rbufT, out_acts)
def split_activations(self, in_acts, out_acts, tmpbufs):
"""
Not ideal we have to do this, but for now this is enough to get things
working
Placeholder function to deal with the facts that we lay our tensors
out in column-major form. Fast inplace transpose can be placed in
here for improved performance
in_acts: replica tensor to be split into fragments
out_acts: fragment tensor to receive
tmpbufs: contains temporary storage for the transposes
"""
assert in_acts.shape[0] == out_acts.shape[0]
assert in_acts.shape[1] == out_acts.shape[1] * self.num_dev
(rbufT, fbufT) = tmpbufs
assert fbufT.shape == out_acts.shape[::-1]
assert rbufT.shape == in_acts.shape[::-1]
self.transpose(in_acts, rbufT)
self.replica_to_fragment(rbufT, fbufT)
self.transpose(fbufT, out_acts)
def reduce(self, ary, ubuf):
"""
Does a summation reduction of the fragments in ary and rebroadcasts
them into ary using butterfly reduction. Uses ubuf as a temporary
storage buffer.
Requires that ubuf has same dtype as ary, and the size of ubuf is large
enough to store the broadcasted sub-fragments of ary.
"""
numrep = self.num_dev
totsz = ary.size
subsz = (totsz + numrep - 1) / numrep
dsz = ary.dtype.itemsize
assert ubuf.size == subsz * numrep
starts = [i * subsz for i in range(numrep)]
strmlist = self.redstrms
evtlist = self._redevents
# GATHER SUB-FRAGMENTS
for dbuf, dctx, doff, dstrm, evt in zip(ubuf.tlist, self.ctxs, starts,
strmlist, evtlist):
for sbuf, sctx, soff, strm in zip(ary.tlist, self.ctxs,
starts, strmlist):
myargs = [dbuf.ptr + soff * dsz,
sbuf.ptr + doff * dsz,
min(subsz, totsz - doff) * dsz]
if sctx == dctx:
cpfunc = drv.memcpy_dtod_async
else:
cpfunc = drv.memcpy_peer_async
myargs.extend([dctx, sctx])
myargs.append(strm)
cpfunc(*myargs)
evt.record(dstrm)
# REDUCTION of SUB-FRAGMENTS
for sbuf, dbuf, sctx, soff, strm, evt in zip(ary.tlist, ubuf.tlist,
self.ctxs, starts,
strmlist, evtlist):
self.ng.stream = strm
end = soff + min(subsz, totsz - soff)
sbuf = sbuf.reshape((totsz, 1))
ubtmp = dbuf.reshape((numrep, dbuf.size / numrep))
sctx.push()
strm.wait_for_event(evt)
self.ng.sum(ubtmp, axis=0, out=sbuf[soff:end])
sctx.pop()
# REBROADCAST
for dbuf, dctx in zip(ary.tlist, self.ctxs):
for sbuf, sctx, soff, strm in zip(ary.tlist, self.ctxs,
starts, strmlist):
if sctx != dctx:
drv.memcpy_peer_async(dbuf.ptr + soff * dsz,
sbuf.ptr + soff * dsz,
min(subsz, totsz - soff) * dsz,
dctx, sctx, strm)
return ary | 0.584271 | 0.072243 |
import bs4
try:
import Product_Finder.backend.sortResults as sortResults
except:
import sortResults
import datetime as date
from urllib.request import urlopen as urlReq
from bs4 import BeautifulSoup as soup
neweggDBPK = 1
def searchInNewegg(searchString, blockedWord, searchPageDepth, sortPreference, currency):
searchString = searchString.replace(' ','+')
results=[]
currentPage = 1
datetime = date.datetime.now()
while currentPage <= searchPageDepth :
if currentPage != 0 :
if currentPage <= (searchPageDepth + 1) :
urlSite = "https://www.newegg.com/p/pl?d=" + searchString + "&Page=" + str(currentPage)
webSite = urlReq(urlSite)
html = webSite.read()
webSite.close()
page_soup = soup(html, 'html.parser')
itemsWholeGrid = page_soup.find('div',{'class':'items-view is-grid'})
try:
itemsWhole = itemsWholeGrid.findAll('div',{'class':'item-container'})
except AttributeError as err:
print(err)
break
for item in itemsWhole:
def itemAnalysis():
#print('--------------------------------')
text = item.find('div',{'class':'item-info'})
name=str(text.find('a',{'class':'item-title'}).text)
price = str(text.find('li',{'class':'price-current'}))[78:85].strip('</strong>').replace(',','')
try:
discount = str(text.find('span',{'class':'price-save-percent'}).text).strip('%')
except:
#print('discount not found')
discount = 0
if discount == 'None' :
discount = 0
if discount=='':
discount=0
itemNumber = str(len(results)+1)
link = str(text.find('a',{'class':'item-title'})['href']).partition('?')[0].strip('https://')
try:
img = item.find('img',{})['data-src']
except:
img= item.find('img',{})['src']
results.append((str(itemNumber), str(price), name, link, str(discount), str(datetime) ,str(neweggDBPK),img))
#print("item #"+ itemNumber +": "+ name +" $"+ price + ' OFF: '+ discount )
bWordFound = 0
for bWord in blockedWord:
if bWord in str(item):
bWordFound+=1
if bWordFound == 0 :
itemAnalysis()
currentPage=currentPage+1
print('results in NewEgg :' + str(len(results)))
if sortPreference == 'Increasing' :
return sortResults.sortIncreasing(results)
if sortPreference == 'Decreasing' :
return sortResults.sortDecreasing(results) | backend/newegg_scrapper.py | import bs4
try:
import Product_Finder.backend.sortResults as sortResults
except:
import sortResults
import datetime as date
from urllib.request import urlopen as urlReq
from bs4 import BeautifulSoup as soup
neweggDBPK = 1
def searchInNewegg(searchString, blockedWord, searchPageDepth, sortPreference, currency):
searchString = searchString.replace(' ','+')
results=[]
currentPage = 1
datetime = date.datetime.now()
while currentPage <= searchPageDepth :
if currentPage != 0 :
if currentPage <= (searchPageDepth + 1) :
urlSite = "https://www.newegg.com/p/pl?d=" + searchString + "&Page=" + str(currentPage)
webSite = urlReq(urlSite)
html = webSite.read()
webSite.close()
page_soup = soup(html, 'html.parser')
itemsWholeGrid = page_soup.find('div',{'class':'items-view is-grid'})
try:
itemsWhole = itemsWholeGrid.findAll('div',{'class':'item-container'})
except AttributeError as err:
print(err)
break
for item in itemsWhole:
def itemAnalysis():
#print('--------------------------------')
text = item.find('div',{'class':'item-info'})
name=str(text.find('a',{'class':'item-title'}).text)
price = str(text.find('li',{'class':'price-current'}))[78:85].strip('</strong>').replace(',','')
try:
discount = str(text.find('span',{'class':'price-save-percent'}).text).strip('%')
except:
#print('discount not found')
discount = 0
if discount == 'None' :
discount = 0
if discount=='':
discount=0
itemNumber = str(len(results)+1)
link = str(text.find('a',{'class':'item-title'})['href']).partition('?')[0].strip('https://')
try:
img = item.find('img',{})['data-src']
except:
img= item.find('img',{})['src']
results.append((str(itemNumber), str(price), name, link, str(discount), str(datetime) ,str(neweggDBPK),img))
#print("item #"+ itemNumber +": "+ name +" $"+ price + ' OFF: '+ discount )
bWordFound = 0
for bWord in blockedWord:
if bWord in str(item):
bWordFound+=1
if bWordFound == 0 :
itemAnalysis()
currentPage=currentPage+1
print('results in NewEgg :' + str(len(results)))
if sortPreference == 'Increasing' :
return sortResults.sortIncreasing(results)
if sortPreference == 'Decreasing' :
return sortResults.sortDecreasing(results) | 0.050776 | 0.074804 |
import nltk
import re
import pprint
import random
class Markov(object):
def __init__(self, order=2, dictFile="", maxWordInSentence=20):
self.table = {}
self.inputLineCount = 0
self.inputWordCount = 0
self.setOrder( order )
self.setMaxWordInSentence(maxWordInSentence)
if dictFile:
self.loadDictionary(dictFile)
def setOrder(self, order=2):
self.order = order
def loadDictionary(self, dictFile):
with open(dictFile, 'r') as inf:
self.table = eval(inf.read())
def readFile(self, filename, fileEncoding="utf-8"):
with open(filename, "r", encoding=fileEncoding) as file:
strLine = " ".join(file)
self.processSection(strLine)
def processSection(self,line ):
sent_text = nltk.sent_tokenize(line) #gives us a list of sentences
for sentence in sent_text:
self.inputLineCount = self.inputLineCount + 1
tokens = sentence.split()
keyList = [ ];
#Adds a special key with just beginning words
self.table.setdefault( '#BEGIN#', []).append(tokens[0:self.order ]);
#loops through each word, and if we have enough to add dictionary item, then add
for item in tokens:
if len(keyList) < self.order :
keyList.append(item)
continue
#If we already have the item, then add it, otherwise add to empty list
self.table.setdefault( tuple(keyList), []).append(item)
#Remove the first word and push last word on to it
keyList.pop(0)
keyList.append(item)
self.inputWordCount = self.inputWordCount + 1
def setMaxWordInSentence(self, maxWordInSentence):
self.maxWordInSentence = maxWordInSentence
def genText(self):
key = list(random.choice( self.table['#BEGIN#'] ))
genStr = " ".join( key )
for _ in range( self.maxWordInSentence ):
newKey = self.table.setdefault( tuple(key), "")
if(newKey == ""):
break
newVal = random.choice( newKey )
genStr = genStr + " " + newVal
key.pop(0)
key.append(newVal)
return genStr
def getLineCount(self):
return self.inputLineCount
def getWordCount(self):
return self.inputWordCount
def outputDict(self, filename):
markovDictFile=open(filename, 'w')
pprint.pprint(self.table,markovDictFile) | MarkovChain.py | import nltk
import re
import pprint
import random
class Markov(object):
def __init__(self, order=2, dictFile="", maxWordInSentence=20):
self.table = {}
self.inputLineCount = 0
self.inputWordCount = 0
self.setOrder( order )
self.setMaxWordInSentence(maxWordInSentence)
if dictFile:
self.loadDictionary(dictFile)
def setOrder(self, order=2):
self.order = order
def loadDictionary(self, dictFile):
with open(dictFile, 'r') as inf:
self.table = eval(inf.read())
def readFile(self, filename, fileEncoding="utf-8"):
with open(filename, "r", encoding=fileEncoding) as file:
strLine = " ".join(file)
self.processSection(strLine)
def processSection(self,line ):
sent_text = nltk.sent_tokenize(line) #gives us a list of sentences
for sentence in sent_text:
self.inputLineCount = self.inputLineCount + 1
tokens = sentence.split()
keyList = [ ];
#Adds a special key with just beginning words
self.table.setdefault( '#BEGIN#', []).append(tokens[0:self.order ]);
#loops through each word, and if we have enough to add dictionary item, then add
for item in tokens:
if len(keyList) < self.order :
keyList.append(item)
continue
#If we already have the item, then add it, otherwise add to empty list
self.table.setdefault( tuple(keyList), []).append(item)
#Remove the first word and push last word on to it
keyList.pop(0)
keyList.append(item)
self.inputWordCount = self.inputWordCount + 1
def setMaxWordInSentence(self, maxWordInSentence):
self.maxWordInSentence = maxWordInSentence
def genText(self):
key = list(random.choice( self.table['#BEGIN#'] ))
genStr = " ".join( key )
for _ in range( self.maxWordInSentence ):
newKey = self.table.setdefault( tuple(key), "")
if(newKey == ""):
break
newVal = random.choice( newKey )
genStr = genStr + " " + newVal
key.pop(0)
key.append(newVal)
return genStr
def getLineCount(self):
return self.inputLineCount
def getWordCount(self):
return self.inputWordCount
def outputDict(self, filename):
markovDictFile=open(filename, 'w')
pprint.pprint(self.table,markovDictFile) | 0.082248 | 0.105671 |
import hashlib
import requests
from xml.etree import ElementTree
from aws_requests_auth.aws_auth import AWSRequestsAuth
def create_bucket(host, bucketName, accessKey, secretKey):
"""
Create a bucket.
@param host: S3/Cleversafe host
@param bucketName: Bucket name
@param accessKey: Access Key ID
@param secretKey: Secret Access Key
"""
auth = AWSRequestsAuth(aws_access_key=accessKey,
aws_secret_access_key=secretKey,
aws_host=host,
aws_region='',
aws_service='s3'
)
url = 'http://{host}/{bucket}'.format(host=host, bucket=bucketName)
response = requests.request('PUT', url, auth=auth)
return response.status_code == 200
class S3Bucket(object):
"""
Simple S3/Cleversafe library. It supports: multipart read/write, getting size and removing.
Uses path-style access to buckets (example.com/bucketName/key), not the virtual hosted-style (bucketName.example.com/key)
"""
def __init__(self, host, bucketName, accessKey, secretKey):
"""
Init the AWS Authenticating module.
@param host: S3/Cleversafe host
@param bucketName: Bucket name
@param accessKey: Access Key ID
@param secretKey: Secret Access Key
"""
self.host = host
self.bucketName = bucketName
self.auth = AWSRequestsAuth(aws_access_key=accessKey,
aws_secret_access_key=secretKey,
aws_host=self.host,
aws_region='',
aws_service='s3'
)
self.s3NS = '{http://s3.amazonaws.com/doc/2006-03-01/}'
@property
def _baseUrl(self):
return 'http://{host}/{bucket}'.format(host=self.host, bucket=self.bucketName)
def _request(self, method, key=None, headers=None, data=None):
"""Helper method. Sets url and headers"""
url = self._baseUrl
if key:
url += '/' + key
if not headers:
headers = dict()
if data:
headers.update(self._get_content_hash_header(data))
response = requests.request(method, url, auth=self.auth, headers=headers, data=data)
response.raise_for_status()
return response
def _create_multipart_upload_body(self, parts):
"""Helper method that creates xml content containing part numbers and it's etags"""
rootElement = ElementTree.Element('CompleteMultipartUpload')
for part in parts:
number, etag = part
partElement = ElementTree.SubElement(rootElement, 'Part')
numberElement = ElementTree.SubElement(partElement, 'PartNumber')
numberElement.text = str(number)
etagElement = ElementTree.SubElement(partElement, 'ETag')
etagElement.text = etag
return ElementTree.tostring(rootElement)
def _get_upload_id(self, response):
"""Helper method that parses xml respons for uploadId value"""
xmlRoot = ElementTree.fromstringlist(response.content)
elementName = '{ns}UploadId'.format(ns=self.s3NS)
return xmlRoot.find(elementName).text
def _get_content_hash_header(self, content):
"""Helper method, calculates hash of request body and returns its in header dict"""
header = {
'x-amz-content-sha256': hashlib.sha256(content).hexdigest()
}
return header
def exists(self, key):
"""
Check if file exists.
@param key: file name
@return: Boolean
"""
response = self._request('HEAD', key)
return response.status_code == 200
def get_size(self, key):
"""
Get file size.
@param key: file name
@return: file size as int
"""
response = self._request('HEAD', key)
return int(response.headers.get('content-length'))
def get_etag(self, key):
"""
Get ETag of file.
@param key: file name
@return: ETag
"""
response = self._request('HEAD', key)
return response.headers.get('etag')
def remove(self, key):
"""
Delete file.
@param key: file name
"""
response = self._request('DELETE', key)
return response.status_code == 204
def read(self, key, offset, size):
"""
Read a part of a file.
@param key: file name
@param offset: the byte to start read
@param size: how many bytes will be read
@return: read content
"""
end = offset + size - 1
headers = {
'Range': 'bytes={begin!s}-{end!s}'.format(begin=offset, end=end)
}
response = self._request('GET', key, headers)
return response.content
def init_multipart_upload(self, key):
"""
Initialize multipart upload. Returns upload Id, an identifier of current upload session.
You have to call completeMultipartUpload after writing all file parts to complete the upload or
abortMultipartUpload to abort this multipart upload session.
@param key: file name
@return: upload Id
"""
url = '{key}?uploads'.format(key=key)
response = self._request('POST', url)
return self._get_upload_id(response)
def write(self, key, partNo, uploadId, content):
"""
Upload a part of file, using a multipart upload. It has to be opened using initMultipartUpload method.
You have to call completeMultipartUpload after writing all file parts to complete the upload or
abortMultipartUpload to abort this multipart upload session.
@param key: fileName
@param partNo: number of part, can be an int <1,10000>
@param uploadId: upload ID
@param content: content to upload
@return: ETag of uploaded content
"""
url = '{key!s}?uploadId={uploadId!s}&partNumber={partNo!s}'.format(key=key, uploadId=uploadId, partNo=partNo)
response = self._request('PUT', url, data=content)
return response.headers.get('etag')
def complete_multipart_upload(self, key, uploadId, parts):
"""
Close upload session. It need a list of all parts numbers and its ETags to complete file upload.
@param key: file name
@param uploadId: upload ID
@param parts: list of tuples containing part number and ETag of this part
"""
body = self._create_multipart_upload_body(parts)
url = '{key}?uploadId={uploadId}'.format(key=key, uploadId=uploadId)
headers = {
'Content-Type': 'text/xml'
}
response = self._request('POST', url, data=body, headers=headers)
return response.status_code == 200
def abort_multipart_upload(self, key, uploadId):
"""
Abort multipart upload session.
@param key: file name
@param uploadId: upload Id
"""
url = '{key}?uploadId={uploadId}'.format(key=key, uploadId=uploadId)
response = self._request('DELETE', url)
return response.status_code == 204
def delete_bucket(self):
"""
Delete bucket.
"""
response = self._request('DELETE')
return response.status_code == 204 | bobos3/bobos3.py |
import hashlib
import requests
from xml.etree import ElementTree
from aws_requests_auth.aws_auth import AWSRequestsAuth
def create_bucket(host, bucketName, accessKey, secretKey):
"""
Create a bucket.
@param host: S3/Cleversafe host
@param bucketName: Bucket name
@param accessKey: Access Key ID
@param secretKey: Secret Access Key
"""
auth = AWSRequestsAuth(aws_access_key=accessKey,
aws_secret_access_key=secretKey,
aws_host=host,
aws_region='',
aws_service='s3'
)
url = 'http://{host}/{bucket}'.format(host=host, bucket=bucketName)
response = requests.request('PUT', url, auth=auth)
return response.status_code == 200
class S3Bucket(object):
"""
Simple S3/Cleversafe library. It supports: multipart read/write, getting size and removing.
Uses path-style access to buckets (example.com/bucketName/key), not the virtual hosted-style (bucketName.example.com/key)
"""
def __init__(self, host, bucketName, accessKey, secretKey):
"""
Init the AWS Authenticating module.
@param host: S3/Cleversafe host
@param bucketName: Bucket name
@param accessKey: Access Key ID
@param secretKey: Secret Access Key
"""
self.host = host
self.bucketName = bucketName
self.auth = AWSRequestsAuth(aws_access_key=accessKey,
aws_secret_access_key=secretKey,
aws_host=self.host,
aws_region='',
aws_service='s3'
)
self.s3NS = '{http://s3.amazonaws.com/doc/2006-03-01/}'
@property
def _baseUrl(self):
return 'http://{host}/{bucket}'.format(host=self.host, bucket=self.bucketName)
def _request(self, method, key=None, headers=None, data=None):
"""Helper method. Sets url and headers"""
url = self._baseUrl
if key:
url += '/' + key
if not headers:
headers = dict()
if data:
headers.update(self._get_content_hash_header(data))
response = requests.request(method, url, auth=self.auth, headers=headers, data=data)
response.raise_for_status()
return response
def _create_multipart_upload_body(self, parts):
"""Helper method that creates xml content containing part numbers and it's etags"""
rootElement = ElementTree.Element('CompleteMultipartUpload')
for part in parts:
number, etag = part
partElement = ElementTree.SubElement(rootElement, 'Part')
numberElement = ElementTree.SubElement(partElement, 'PartNumber')
numberElement.text = str(number)
etagElement = ElementTree.SubElement(partElement, 'ETag')
etagElement.text = etag
return ElementTree.tostring(rootElement)
def _get_upload_id(self, response):
"""Helper method that parses xml respons for uploadId value"""
xmlRoot = ElementTree.fromstringlist(response.content)
elementName = '{ns}UploadId'.format(ns=self.s3NS)
return xmlRoot.find(elementName).text
def _get_content_hash_header(self, content):
"""Helper method, calculates hash of request body and returns its in header dict"""
header = {
'x-amz-content-sha256': hashlib.sha256(content).hexdigest()
}
return header
def exists(self, key):
"""
Check if file exists.
@param key: file name
@return: Boolean
"""
response = self._request('HEAD', key)
return response.status_code == 200
def get_size(self, key):
"""
Get file size.
@param key: file name
@return: file size as int
"""
response = self._request('HEAD', key)
return int(response.headers.get('content-length'))
def get_etag(self, key):
"""
Get ETag of file.
@param key: file name
@return: ETag
"""
response = self._request('HEAD', key)
return response.headers.get('etag')
def remove(self, key):
"""
Delete file.
@param key: file name
"""
response = self._request('DELETE', key)
return response.status_code == 204
def read(self, key, offset, size):
"""
Read a part of a file.
@param key: file name
@param offset: the byte to start read
@param size: how many bytes will be read
@return: read content
"""
end = offset + size - 1
headers = {
'Range': 'bytes={begin!s}-{end!s}'.format(begin=offset, end=end)
}
response = self._request('GET', key, headers)
return response.content
def init_multipart_upload(self, key):
"""
Initialize multipart upload. Returns upload Id, an identifier of current upload session.
You have to call completeMultipartUpload after writing all file parts to complete the upload or
abortMultipartUpload to abort this multipart upload session.
@param key: file name
@return: upload Id
"""
url = '{key}?uploads'.format(key=key)
response = self._request('POST', url)
return self._get_upload_id(response)
def write(self, key, partNo, uploadId, content):
"""
Upload a part of file, using a multipart upload. It has to be opened using initMultipartUpload method.
You have to call completeMultipartUpload after writing all file parts to complete the upload or
abortMultipartUpload to abort this multipart upload session.
@param key: fileName
@param partNo: number of part, can be an int <1,10000>
@param uploadId: upload ID
@param content: content to upload
@return: ETag of uploaded content
"""
url = '{key!s}?uploadId={uploadId!s}&partNumber={partNo!s}'.format(key=key, uploadId=uploadId, partNo=partNo)
response = self._request('PUT', url, data=content)
return response.headers.get('etag')
def complete_multipart_upload(self, key, uploadId, parts):
"""
Close upload session. It need a list of all parts numbers and its ETags to complete file upload.
@param key: file name
@param uploadId: upload ID
@param parts: list of tuples containing part number and ETag of this part
"""
body = self._create_multipart_upload_body(parts)
url = '{key}?uploadId={uploadId}'.format(key=key, uploadId=uploadId)
headers = {
'Content-Type': 'text/xml'
}
response = self._request('POST', url, data=body, headers=headers)
return response.status_code == 200
def abort_multipart_upload(self, key, uploadId):
"""
Abort multipart upload session.
@param key: file name
@param uploadId: upload Id
"""
url = '{key}?uploadId={uploadId}'.format(key=key, uploadId=uploadId)
response = self._request('DELETE', url)
return response.status_code == 204
def delete_bucket(self):
"""
Delete bucket.
"""
response = self._request('DELETE')
return response.status_code == 204 | 0.747432 | 0.08389 |
from __future__ import absolute_import
import datetime
import re
import pytz
from pytz import UTC
from .util import if_none
#-------------------------------------------------------------------------------
def ensure_date(date):
"""
Attempts to convert an object to a date.
Accepts the following:
- A `date` instance.
- A `datetime` instance, as long as the time zone is set.
- A YYYYMMDD `int`.
- A YYYYMMDD `str`.
- A YYYY-MM-DD `str`.
- `"local-today"`, for today in the host's local time zone.
- `"utc-today"`, for today in UTC.
@rtype
`datetime.date`
"""
if isinstance(date, datetime.datetime):
if date.tzinfo is None:
# No matter what the datetime module may say, a datetime without a
# timezone is not a date, nor does it specify one.
raise TypeError(
"can't convert datetime to date without a time zone")
else:
return date.date()
if isinstance(date, datetime.date):
return date
if isinstance(date, int) and 18000000 <= date <= 30000000:
# Assume it's YYYYMMDD-encoded.
return datetime.date(date // 10000, (date % 10000) // 100, date % 100)
if date == "local-today":
return datetime.date.today()
if date == "utc-today":
return datetime.datetime.utcnow().date()
def from_ymd(y, m, d):
try:
return datetime.date(y, m, d)
except ValueError:
raise TypeError("not a date: {!r}".format(date))
if isinstance(date, str):
match = re.match(r"([12]\d\d\d)([01]\d)([0-3]\d)$", date)
if match is None:
match = re.match(r"([12]\d\d\d)-([01]?\d)-([0-3]?\d)$", date)
if match is not None:
# It's "YYYYMMDD"-encoded.
return from_ymd(*[ int(g) for g in match.groups() ])
raise TypeError("not a date: {!r}".format(date))
def ensure_time(time):
"""
Attempts to convert an object to a time (of day).
@rtype
`datetime.time`
"""
if isinstance(time, datetime.datetime):
return date.time()
if isinstance(time, datetime.time):
return time
if time == "local-now":
return datetime.datetime.now().time()
if time == "utc-now":
return datetime.datetime.utcnow().time()
def from_parts(h, m, s=0):
try:
return datetime.time(h, m, s)
except ValueError:
raise TypeError("not a time: {!r}".format(time))
if isinstance(time, str):
match = re.match(r"(\d?\d):(\d\d):(\d\d)", time)
if match is None:
match = re.match(r"(\d?\d):(\d\d)", time)
if match is not None:
return from_parts(*[ int(g) for g in match.groups() ])
raise TypeError("not a time: {!r}".format(time))
_DATETIME_REGEXES = [
re.compile(r)
for r in (
r"(?P<ye>[12]\d\d\d)-(?P<mo>[01]?\d)-(?P<da>[0-3]?\d) (?P<ho>[0-2]?\d):(?P<mi>[0-5]\d):(?P<se>\d\d)?$",
r"(?P<ye>[12]\d\d\d)-(?P<mo>[01]?\d)-(?P<da>[0-3]\d)T(?P<ho>[0-2]?\d):(?P<mi>[0-5]\d):(?P<se>\d\d)?Z$",
)
]
def ensure_datetime(dt):
"""
Attempts to convert an object to a UTC datetime.
Accepts the following:
- A `datetime` instance.
- A `numpy.datetime64` instance.
- `"now"`, for the current datetime.
- A "YYYY-MM-DD HH:MM:SS" string, assumed to be UTC.
- A "YYYY-MM-DDTHH:MM:SSZ" ISO 8601 string.
"""
if isinstance(dt, datetime.datetime):
return dt
# Check for numpy.datetime64. We just try to convert it like this to avoid
# importing numpy, if it hasn't been yet.
try:
item = dt.item()
except:
pass
else:
if isinstance(item, datetime.datetime):
return item.replace(tzinfo=UTC)
if dt == "now":
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def from_parts(ye, mo, da, ho=0, mi=0, se=0):
try:
return datetime.datetime(ye, mo, da, ho, mi, se, tzinfo=UTC)
except ValueError:
raise TypeError("not a datetime: {!r}".format(dt))
if isinstance(dt, str):
for regex in _DATETIME_REGEXES:
match = regex.match(dt)
if match is not None:
ye = int(match.group("ye"))
mo = int(match.group("mo"))
da = int(match.group("da"))
ho = int(if_none(match.group("ho"), 0))
mi = int(if_none(match.group("mi"), 0))
se = int(if_none(match.group("se"), 0))
return from_parts(ye, mo, da, ho, mi, se)
raise TypeError("not a datetime: {!r}".format(dt))
def ensure_timedelta(delta):
"""
Attempts to convert an object to a time delta.
@rtype
`datetime.timedelta`
"""
if isinstance(delta, datetime.timedelta):
return delta
if isinstance(delta, str):
# FIXME
match = re.match(r"(\d+) *(d|h|m|s)", delta)
if match is not None:
num, unit = match.groups()
if unit == "d":
return datetime.timedelta(int(num), 0)
else:
secs = int(num) * {"h": 3600, "m": 60, "s": 1}.get(unit)
return datetime.timedelta(0, secs)
raise TypeError("not a timedelta: {!r}".format(delta))
def ensure_tz(tz):
"""
Attempts to convert an object to a time zone.
@rtype
`datetime.tzinfo`
"""
if isinstance(tz, datetime.tzinfo):
return tz
if tz is None:
return UTC
if isinstance(tz, str):
try:
return pytz.timezone(tz)
except pytz.exceptions.UnknownTimeZoneError:
raise ValueError("not a time zone: {}".format(tz))
raise TypeError("not a time zone: {!r}".format(tz)) | ngrid/datetime.py |
from __future__ import absolute_import
import datetime
import re
import pytz
from pytz import UTC
from .util import if_none
#-------------------------------------------------------------------------------
def ensure_date(date):
"""
Attempts to convert an object to a date.
Accepts the following:
- A `date` instance.
- A `datetime` instance, as long as the time zone is set.
- A YYYYMMDD `int`.
- A YYYYMMDD `str`.
- A YYYY-MM-DD `str`.
- `"local-today"`, for today in the host's local time zone.
- `"utc-today"`, for today in UTC.
@rtype
`datetime.date`
"""
if isinstance(date, datetime.datetime):
if date.tzinfo is None:
# No matter what the datetime module may say, a datetime without a
# timezone is not a date, nor does it specify one.
raise TypeError(
"can't convert datetime to date without a time zone")
else:
return date.date()
if isinstance(date, datetime.date):
return date
if isinstance(date, int) and 18000000 <= date <= 30000000:
# Assume it's YYYYMMDD-encoded.
return datetime.date(date // 10000, (date % 10000) // 100, date % 100)
if date == "local-today":
return datetime.date.today()
if date == "utc-today":
return datetime.datetime.utcnow().date()
def from_ymd(y, m, d):
try:
return datetime.date(y, m, d)
except ValueError:
raise TypeError("not a date: {!r}".format(date))
if isinstance(date, str):
match = re.match(r"([12]\d\d\d)([01]\d)([0-3]\d)$", date)
if match is None:
match = re.match(r"([12]\d\d\d)-([01]?\d)-([0-3]?\d)$", date)
if match is not None:
# It's "YYYYMMDD"-encoded.
return from_ymd(*[ int(g) for g in match.groups() ])
raise TypeError("not a date: {!r}".format(date))
def ensure_time(time):
"""
Attempts to convert an object to a time (of day).
@rtype
`datetime.time`
"""
if isinstance(time, datetime.datetime):
return date.time()
if isinstance(time, datetime.time):
return time
if time == "local-now":
return datetime.datetime.now().time()
if time == "utc-now":
return datetime.datetime.utcnow().time()
def from_parts(h, m, s=0):
try:
return datetime.time(h, m, s)
except ValueError:
raise TypeError("not a time: {!r}".format(time))
if isinstance(time, str):
match = re.match(r"(\d?\d):(\d\d):(\d\d)", time)
if match is None:
match = re.match(r"(\d?\d):(\d\d)", time)
if match is not None:
return from_parts(*[ int(g) for g in match.groups() ])
raise TypeError("not a time: {!r}".format(time))
_DATETIME_REGEXES = [
re.compile(r)
for r in (
r"(?P<ye>[12]\d\d\d)-(?P<mo>[01]?\d)-(?P<da>[0-3]?\d) (?P<ho>[0-2]?\d):(?P<mi>[0-5]\d):(?P<se>\d\d)?$",
r"(?P<ye>[12]\d\d\d)-(?P<mo>[01]?\d)-(?P<da>[0-3]\d)T(?P<ho>[0-2]?\d):(?P<mi>[0-5]\d):(?P<se>\d\d)?Z$",
)
]
def ensure_datetime(dt):
"""
Attempts to convert an object to a UTC datetime.
Accepts the following:
- A `datetime` instance.
- A `numpy.datetime64` instance.
- `"now"`, for the current datetime.
- A "YYYY-MM-DD HH:MM:SS" string, assumed to be UTC.
- A "YYYY-MM-DDTHH:MM:SSZ" ISO 8601 string.
"""
if isinstance(dt, datetime.datetime):
return dt
# Check for numpy.datetime64. We just try to convert it like this to avoid
# importing numpy, if it hasn't been yet.
try:
item = dt.item()
except:
pass
else:
if isinstance(item, datetime.datetime):
return item.replace(tzinfo=UTC)
if dt == "now":
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def from_parts(ye, mo, da, ho=0, mi=0, se=0):
try:
return datetime.datetime(ye, mo, da, ho, mi, se, tzinfo=UTC)
except ValueError:
raise TypeError("not a datetime: {!r}".format(dt))
if isinstance(dt, str):
for regex in _DATETIME_REGEXES:
match = regex.match(dt)
if match is not None:
ye = int(match.group("ye"))
mo = int(match.group("mo"))
da = int(match.group("da"))
ho = int(if_none(match.group("ho"), 0))
mi = int(if_none(match.group("mi"), 0))
se = int(if_none(match.group("se"), 0))
return from_parts(ye, mo, da, ho, mi, se)
raise TypeError("not a datetime: {!r}".format(dt))
def ensure_timedelta(delta):
"""
Attempts to convert an object to a time delta.
@rtype
`datetime.timedelta`
"""
if isinstance(delta, datetime.timedelta):
return delta
if isinstance(delta, str):
# FIXME
match = re.match(r"(\d+) *(d|h|m|s)", delta)
if match is not None:
num, unit = match.groups()
if unit == "d":
return datetime.timedelta(int(num), 0)
else:
secs = int(num) * {"h": 3600, "m": 60, "s": 1}.get(unit)
return datetime.timedelta(0, secs)
raise TypeError("not a timedelta: {!r}".format(delta))
def ensure_tz(tz):
"""
Attempts to convert an object to a time zone.
@rtype
`datetime.tzinfo`
"""
if isinstance(tz, datetime.tzinfo):
return tz
if tz is None:
return UTC
if isinstance(tz, str):
try:
return pytz.timezone(tz)
except pytz.exceptions.UnknownTimeZoneError:
raise ValueError("not a time zone: {}".format(tz))
raise TypeError("not a time zone: {!r}".format(tz)) | 0.722037 | 0.340184 |
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sip
from application.view.target_button import TargetButton
from application.view.chess_button import ChessButton
INTERVAL = 50
LONG_RADIUS = INTERVAL * 4
SHORT_RADIUS = INTERVAL * 2
CHESS_SIZE = 30
class GameView(QWidget):
def __init__(self, *__args):
super().__init__(*__args)
self.click_callback = None
self.target_click_callback = None
self.chess_move_callback = None
self.game_begin_callback = None
self.change_mode_callback = None
self.gen_callback = None
self.targets = []
self.chess_list = []
self._player = -1
self._is_ai_first_go = False
self._init_view()
self._init_timer()
def _init_view(self):
self._setup_buttons()
self.human_radio = QRadioButton("人人", self)
self.human_radio.setGeometry(INTERVAL * 10, 0, 100, 25)
self.ai_radio = QRadioButton("人机", self)
self.ai_radio.setGeometry(INTERVAL * 10 + 100, 0, 100, 25)
mode_button_group = QButtonGroup(self)
mode_button_group.addButton(self.human_radio, 1)
mode_button_group.addButton(self.ai_radio, 2)
mode_button_group.buttonClicked.connect(self._select_mode_radio)
self.first_human_radio = QRadioButton("人先手", self)
self.first_human_radio.setGeometry(INTERVAL * 10, 35, 100, 25)
self.first_human_radio.hide()
self.first_ai_radio = QRadioButton("机先手", self)
self.first_ai_radio.setGeometry(INTERVAL * 10 + 100, 35, 100, 25)
self.first_ai_radio.hide()
first_button_group = QButtonGroup(self)
first_button_group.addButton(self.first_human_radio, 1)
first_button_group.addButton(self.first_ai_radio, 2)
first_button_group.buttonClicked.connect(self._select_first_radio)
self.begin_button = QPushButton(self)
self.begin_button.setStyleSheet("QPushButton{border-radius: 10; background-color: white; color: black;}"
"QPushButton:hover{background-color: lightgray}")
self.begin_button.setText("开始")
self.begin_button.setGeometry(INTERVAL * 10, 70, 200, 25)
self.begin_button.clicked.connect(self._click_begin_button)
self.gen_button = QPushButton(self)
self.gen_button.setStyleSheet("QPushButton{border-radius: 10; background-color: white; color: black;}"
"QPushButton:hover{background-color: lightgray}")
self.gen_button.setText("生成棋谱")
self.gen_button.setGeometry(INTERVAL * 10, 100, 200, 25)
self.gen_button.clicked.connect(self._click_gen_button)
self.red_time_label = QLabel(self)
self.red_time_label.setText("00:00")
self.red_time_label.setStyleSheet("color: red")
self.red_time_label.setGeometry(INTERVAL * 10, 130, 100, 25)
self.blue_time_label = QLabel(self)
self.blue_time_label.setText("00:00")
self.blue_time_label.setStyleSheet("color: blue")
self.blue_time_label.setGeometry(INTERVAL * 10 + 100, 130, 100, 25)
self.list_widget = QListWidget(self)
self.list_widget.setGeometry(INTERVAL * 10, 160, 200, 300)
def _init_timer(self):
self._red_time = 0
self._blue_time = 0
self._timer = QTimer(self)
self._timer.setInterval(1000)
self._timer.timeout.connect(self._timer_operate)
def show_game_end(self, player):
if player == -1:
message = "红方获胜"
else:
message = "蓝方获胜"
print(message)
def show_targets(self, frames):
self.remove_all_targets()
for frame in frames:
btn = TargetButton(self)
btn.setup_frame(frame)
btn.clicked.connect(self._click_target_btn)
btn.show()
self.targets.append(btn)
def remove_all_targets(self):
for btn in self.targets:
btn.hide()
sip.delete(btn)
self.targets.clear()
def remove_chess(self, tag):
for btn in self.chess_list:
if btn.tag == tag:
self.chess_list.remove(btn)
btn.hide()
sip.delete(btn)
break
def move_chess(self, chess_tag, to_frame):
self._player = -self._player
for chess in self.chess_list:
if chess_tag == chess.tag:
chess.move(to_frame[1] - CHESS_SIZE / 2, to_frame[0] - CHESS_SIZE / 2)
# 移动完棋子要回调修改棋盘数据
self.chess_move_callback(to_frame)
return
def add_move_info(self, tag: int, f: tuple, t: tuple):
text = "tag {tag}: ({fx}, {fy}) -> ({tx}, {ty})".format(tag=tag,
fx=f[0],
fy=f[1],
tx=t[0],
ty=t[1])
item = QListWidgetItem(text)
self.list_widget.addItem(item)
@pyqtSlot()
def _click_gen_button(self):
self.gen_callback()
@pyqtSlot()
def _click_btn(self):
self.click_callback(self.sender().tag)
@pyqtSlot()
def _click_target_btn(self):
self.target_click_callback(self.sender().x, self.sender().y)
@pyqtSlot()
def _select_mode_radio(self):
if self.sender().checkedId() == 1:
self.first_human_radio.hide()
self.first_ai_radio.hide()
self.change_mode_callback(1)
else:
self.first_human_radio.show()
self.first_ai_radio.show()
self.change_mode_callback(2)
@pyqtSlot()
def _click_begin_button(self):
self._player = 1
self._timer.start()
self.begin_button.setEnabled(False)
self.ai_radio.setEnabled(False)
self.human_radio.setEnabled(False)
self.first_human_radio.setEnabled(False)
self.first_ai_radio.setEnabled(False)
self.game_begin_callback(self._is_ai_first_go)
@pyqtSlot()
def _select_first_radio(self):
if self.sender().checkedId() == 1:
self._is_ai_first_go = False
else:
self._is_ai_first_go = True
@pyqtSlot()
def _timer_operate(self):
if self._player == -1:
self._red_time += 1
else:
self._blue_time += 1
time = self._red_time if self._player == -1 else self._blue_time
m = int(time / 60)
if m < 10:
str_m = "0{m}".format(m=m)
else:
str_m = str(m)
s = time - m * 60
if s < 10:
str_s = "0{s}".format(s=s)
else:
str_s = str(s)
if self._player == -1:
self.red_time_label.setText(str_m + ":" + str_s)
else:
self.blue_time_label.setText(str_m + ":" + str_s)
def _setup_buttons(self):
begin_x = INTERVAL * 2
begin_y = INTERVAL * 2
for i in range(0, 24):
btn = ChessButton(self)
if i < 6:
btn.setup_view(True)
btn.setGeometry(begin_x + INTERVAL * i - CHESS_SIZE / 2,
begin_y - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
elif i < 12:
btn.setup_view(True)
btn.setGeometry(begin_x + INTERVAL * (i - 6) - CHESS_SIZE / 2,
begin_y + INTERVAL - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
elif i < 18:
btn.setup_view(False)
btn.setGeometry(begin_x + INTERVAL * (i - 12) - CHESS_SIZE / 2,
begin_y + INTERVAL * 4 - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
else:
btn.setup_view(False)
btn.setGeometry(begin_x + INTERVAL * (i - 18) - CHESS_SIZE / 2,
begin_y + INTERVAL * 5 - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
btn.setText(str(i + 1))
btn.tag = i + 1
btn.clicked.connect(self._click_btn)
self.chess_list.append(btn)
def paintEvent(self, QPaintEvent):
painter = QPainter(self)
painter.setPen(QColor(166, 66, 250))
# 左上
painter.drawArc(0, 0, LONG_RADIUS, LONG_RADIUS, 0, 270 * 16)
painter.drawArc(INTERVAL, INTERVAL, SHORT_RADIUS, SHORT_RADIUS, 0, 270 * 16)
# 左下
painter.drawArc(0, INTERVAL * 5, LONG_RADIUS, LONG_RADIUS, 90 * 16, 270 * 16)
painter.drawArc(INTERVAL, INTERVAL * 6, SHORT_RADIUS, SHORT_RADIUS, 90 * 16, 270 * 16)
# 右上
painter.drawArc(INTERVAL * 5, 0, LONG_RADIUS, LONG_RADIUS, -90 * 16, 270 * 16)
painter.drawArc(INTERVAL * 6, INTERVAL, SHORT_RADIUS, SHORT_RADIUS, -90 * 16, 270 * 16)
# 右下
painter.drawArc(INTERVAL * 5, INTERVAL * 5, LONG_RADIUS, LONG_RADIUS, -180 * 16, 270 * 16)
painter.drawArc(INTERVAL * 6, INTERVAL * 6, SHORT_RADIUS, SHORT_RADIUS, -180 * 16, 270 * 16)
# 竖线
painter.drawLine(INTERVAL * 2, INTERVAL * 2, INTERVAL * 2, INTERVAL * 7)
painter.drawLine(INTERVAL * 3, INTERVAL * 2, INTERVAL * 3, INTERVAL * 7)
painter.drawLine(INTERVAL * 4, INTERVAL * 2, INTERVAL * 4, INTERVAL * 7)
painter.drawLine(INTERVAL * 5, INTERVAL * 2, INTERVAL * 5, INTERVAL * 7)
painter.drawLine(INTERVAL * 6, INTERVAL * 2, INTERVAL * 6, INTERVAL * 7)
painter.drawLine(INTERVAL * 7, INTERVAL * 2, INTERVAL * 7, INTERVAL * 7)
# 横线
painter.drawLine(INTERVAL * 2, INTERVAL * 2, INTERVAL * 7, INTERVAL * 2)
painter.drawLine(INTERVAL * 2, INTERVAL * 3, INTERVAL * 7, INTERVAL * 3)
painter.drawLine(INTERVAL * 2, INTERVAL * 4, INTERVAL * 7, INTERVAL * 4)
painter.drawLine(INTERVAL * 2, INTERVAL * 5, INTERVAL * 7, INTERVAL * 5)
painter.drawLine(INTERVAL * 2, INTERVAL * 6, INTERVAL * 7, INTERVAL * 6)
painter.drawLine(INTERVAL * 2, INTERVAL * 7, INTERVAL * 7, INTERVAL * 7)
painter.drawLine(INTERVAL * 2, INTERVAL * 7, INTERVAL * 7, INTERVAL * 7) | surakarta_client/application/view/game_view.py | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sip
from application.view.target_button import TargetButton
from application.view.chess_button import ChessButton
INTERVAL = 50
LONG_RADIUS = INTERVAL * 4
SHORT_RADIUS = INTERVAL * 2
CHESS_SIZE = 30
class GameView(QWidget):
def __init__(self, *__args):
super().__init__(*__args)
self.click_callback = None
self.target_click_callback = None
self.chess_move_callback = None
self.game_begin_callback = None
self.change_mode_callback = None
self.gen_callback = None
self.targets = []
self.chess_list = []
self._player = -1
self._is_ai_first_go = False
self._init_view()
self._init_timer()
def _init_view(self):
self._setup_buttons()
self.human_radio = QRadioButton("人人", self)
self.human_radio.setGeometry(INTERVAL * 10, 0, 100, 25)
self.ai_radio = QRadioButton("人机", self)
self.ai_radio.setGeometry(INTERVAL * 10 + 100, 0, 100, 25)
mode_button_group = QButtonGroup(self)
mode_button_group.addButton(self.human_radio, 1)
mode_button_group.addButton(self.ai_radio, 2)
mode_button_group.buttonClicked.connect(self._select_mode_radio)
self.first_human_radio = QRadioButton("人先手", self)
self.first_human_radio.setGeometry(INTERVAL * 10, 35, 100, 25)
self.first_human_radio.hide()
self.first_ai_radio = QRadioButton("机先手", self)
self.first_ai_radio.setGeometry(INTERVAL * 10 + 100, 35, 100, 25)
self.first_ai_radio.hide()
first_button_group = QButtonGroup(self)
first_button_group.addButton(self.first_human_radio, 1)
first_button_group.addButton(self.first_ai_radio, 2)
first_button_group.buttonClicked.connect(self._select_first_radio)
self.begin_button = QPushButton(self)
self.begin_button.setStyleSheet("QPushButton{border-radius: 10; background-color: white; color: black;}"
"QPushButton:hover{background-color: lightgray}")
self.begin_button.setText("开始")
self.begin_button.setGeometry(INTERVAL * 10, 70, 200, 25)
self.begin_button.clicked.connect(self._click_begin_button)
self.gen_button = QPushButton(self)
self.gen_button.setStyleSheet("QPushButton{border-radius: 10; background-color: white; color: black;}"
"QPushButton:hover{background-color: lightgray}")
self.gen_button.setText("生成棋谱")
self.gen_button.setGeometry(INTERVAL * 10, 100, 200, 25)
self.gen_button.clicked.connect(self._click_gen_button)
self.red_time_label = QLabel(self)
self.red_time_label.setText("00:00")
self.red_time_label.setStyleSheet("color: red")
self.red_time_label.setGeometry(INTERVAL * 10, 130, 100, 25)
self.blue_time_label = QLabel(self)
self.blue_time_label.setText("00:00")
self.blue_time_label.setStyleSheet("color: blue")
self.blue_time_label.setGeometry(INTERVAL * 10 + 100, 130, 100, 25)
self.list_widget = QListWidget(self)
self.list_widget.setGeometry(INTERVAL * 10, 160, 200, 300)
def _init_timer(self):
self._red_time = 0
self._blue_time = 0
self._timer = QTimer(self)
self._timer.setInterval(1000)
self._timer.timeout.connect(self._timer_operate)
def show_game_end(self, player):
if player == -1:
message = "红方获胜"
else:
message = "蓝方获胜"
print(message)
def show_targets(self, frames):
self.remove_all_targets()
for frame in frames:
btn = TargetButton(self)
btn.setup_frame(frame)
btn.clicked.connect(self._click_target_btn)
btn.show()
self.targets.append(btn)
def remove_all_targets(self):
for btn in self.targets:
btn.hide()
sip.delete(btn)
self.targets.clear()
def remove_chess(self, tag):
for btn in self.chess_list:
if btn.tag == tag:
self.chess_list.remove(btn)
btn.hide()
sip.delete(btn)
break
def move_chess(self, chess_tag, to_frame):
self._player = -self._player
for chess in self.chess_list:
if chess_tag == chess.tag:
chess.move(to_frame[1] - CHESS_SIZE / 2, to_frame[0] - CHESS_SIZE / 2)
# 移动完棋子要回调修改棋盘数据
self.chess_move_callback(to_frame)
return
def add_move_info(self, tag: int, f: tuple, t: tuple):
text = "tag {tag}: ({fx}, {fy}) -> ({tx}, {ty})".format(tag=tag,
fx=f[0],
fy=f[1],
tx=t[0],
ty=t[1])
item = QListWidgetItem(text)
self.list_widget.addItem(item)
@pyqtSlot()
def _click_gen_button(self):
self.gen_callback()
@pyqtSlot()
def _click_btn(self):
self.click_callback(self.sender().tag)
@pyqtSlot()
def _click_target_btn(self):
self.target_click_callback(self.sender().x, self.sender().y)
@pyqtSlot()
def _select_mode_radio(self):
if self.sender().checkedId() == 1:
self.first_human_radio.hide()
self.first_ai_radio.hide()
self.change_mode_callback(1)
else:
self.first_human_radio.show()
self.first_ai_radio.show()
self.change_mode_callback(2)
@pyqtSlot()
def _click_begin_button(self):
self._player = 1
self._timer.start()
self.begin_button.setEnabled(False)
self.ai_radio.setEnabled(False)
self.human_radio.setEnabled(False)
self.first_human_radio.setEnabled(False)
self.first_ai_radio.setEnabled(False)
self.game_begin_callback(self._is_ai_first_go)
@pyqtSlot()
def _select_first_radio(self):
if self.sender().checkedId() == 1:
self._is_ai_first_go = False
else:
self._is_ai_first_go = True
@pyqtSlot()
def _timer_operate(self):
if self._player == -1:
self._red_time += 1
else:
self._blue_time += 1
time = self._red_time if self._player == -1 else self._blue_time
m = int(time / 60)
if m < 10:
str_m = "0{m}".format(m=m)
else:
str_m = str(m)
s = time - m * 60
if s < 10:
str_s = "0{s}".format(s=s)
else:
str_s = str(s)
if self._player == -1:
self.red_time_label.setText(str_m + ":" + str_s)
else:
self.blue_time_label.setText(str_m + ":" + str_s)
def _setup_buttons(self):
begin_x = INTERVAL * 2
begin_y = INTERVAL * 2
for i in range(0, 24):
btn = ChessButton(self)
if i < 6:
btn.setup_view(True)
btn.setGeometry(begin_x + INTERVAL * i - CHESS_SIZE / 2,
begin_y - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
elif i < 12:
btn.setup_view(True)
btn.setGeometry(begin_x + INTERVAL * (i - 6) - CHESS_SIZE / 2,
begin_y + INTERVAL - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
elif i < 18:
btn.setup_view(False)
btn.setGeometry(begin_x + INTERVAL * (i - 12) - CHESS_SIZE / 2,
begin_y + INTERVAL * 4 - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
else:
btn.setup_view(False)
btn.setGeometry(begin_x + INTERVAL * (i - 18) - CHESS_SIZE / 2,
begin_y + INTERVAL * 5 - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
btn.setText(str(i + 1))
btn.tag = i + 1
btn.clicked.connect(self._click_btn)
self.chess_list.append(btn)
def paintEvent(self, QPaintEvent):
painter = QPainter(self)
painter.setPen(QColor(166, 66, 250))
# 左上
painter.drawArc(0, 0, LONG_RADIUS, LONG_RADIUS, 0, 270 * 16)
painter.drawArc(INTERVAL, INTERVAL, SHORT_RADIUS, SHORT_RADIUS, 0, 270 * 16)
# 左下
painter.drawArc(0, INTERVAL * 5, LONG_RADIUS, LONG_RADIUS, 90 * 16, 270 * 16)
painter.drawArc(INTERVAL, INTERVAL * 6, SHORT_RADIUS, SHORT_RADIUS, 90 * 16, 270 * 16)
# 右上
painter.drawArc(INTERVAL * 5, 0, LONG_RADIUS, LONG_RADIUS, -90 * 16, 270 * 16)
painter.drawArc(INTERVAL * 6, INTERVAL, SHORT_RADIUS, SHORT_RADIUS, -90 * 16, 270 * 16)
# 右下
painter.drawArc(INTERVAL * 5, INTERVAL * 5, LONG_RADIUS, LONG_RADIUS, -180 * 16, 270 * 16)
painter.drawArc(INTERVAL * 6, INTERVAL * 6, SHORT_RADIUS, SHORT_RADIUS, -180 * 16, 270 * 16)
# 竖线
painter.drawLine(INTERVAL * 2, INTERVAL * 2, INTERVAL * 2, INTERVAL * 7)
painter.drawLine(INTERVAL * 3, INTERVAL * 2, INTERVAL * 3, INTERVAL * 7)
painter.drawLine(INTERVAL * 4, INTERVAL * 2, INTERVAL * 4, INTERVAL * 7)
painter.drawLine(INTERVAL * 5, INTERVAL * 2, INTERVAL * 5, INTERVAL * 7)
painter.drawLine(INTERVAL * 6, INTERVAL * 2, INTERVAL * 6, INTERVAL * 7)
painter.drawLine(INTERVAL * 7, INTERVAL * 2, INTERVAL * 7, INTERVAL * 7)
# 横线
painter.drawLine(INTERVAL * 2, INTERVAL * 2, INTERVAL * 7, INTERVAL * 2)
painter.drawLine(INTERVAL * 2, INTERVAL * 3, INTERVAL * 7, INTERVAL * 3)
painter.drawLine(INTERVAL * 2, INTERVAL * 4, INTERVAL * 7, INTERVAL * 4)
painter.drawLine(INTERVAL * 2, INTERVAL * 5, INTERVAL * 7, INTERVAL * 5)
painter.drawLine(INTERVAL * 2, INTERVAL * 6, INTERVAL * 7, INTERVAL * 6)
painter.drawLine(INTERVAL * 2, INTERVAL * 7, INTERVAL * 7, INTERVAL * 7)
painter.drawLine(INTERVAL * 2, INTERVAL * 7, INTERVAL * 7, INTERVAL * 7) | 0.474631 | 0.069321 |
from PIL import Image
import coremltools as ct
import os
import numpy as np
import cv2
import json
from json import JSONEncoder
from pathlib import Path
from objectDetectionMetrics.BoundingBox import BoundingBox
from objectDetectionMetrics.BoundingBoxes import BoundingBoxes
from objectDetectionMetrics.Evaluator import *
from objectDetectionMetrics.utils import *
from argparse import ArgumentParser
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
classLabels = [
"Person",
"Sneakers",
"Chair",
"Other Shoes",
"Hat",
"Car",
"Lamp",
"Glasses",
"Bottle",
"Desk",
"Cup",
"Street Lights",
"Cabinet-shelf",
"Handbag-Satchel",
"Bracelet",
"Plate",
"Picture-Frame",
"Helmet",
"Book",
"Gloves",
"Storage box",
"Boat",
"Leather Shoes",
"Flower",
"Bench",
"Potted Plant",
"Bowl-Basin",
"Flag",
"Pillow",
"Boots",
"Vase",
"Microphone",
"Necklace",
"Ring",
"SUV",
"Wine Glass",
"Belt",
"Monitor-TV",
"Backpack",
"Umbrella",
"Traffic Light",
"Speaker",
"Watch",
"Tie",
"Trash bin Can",
"Slippers",
"Bicycle",
"Stool",
"Barrel-bucket",
"Van",
"Couch",
"Sandals",
"Basket",
"Drum",
"Pen-Pencil",
"Bus",
"Wild Bird",
"High Heels",
"Motorcycle",
"Guitar",
"Carpet",
"Cell Phone",
"Bread",
"Camera",
"Canned",
"Truck",
"Traffic cone",
"Cymbal",
"Lifesaver",
"Towel",
"Stuffed Toy",
"Candle",
"Sailboat",
"Laptop",
"Awning",
"Bed",
"Faucet",
"Tent",
"Horse",
"Mirror",
"Power outlet",
"Sink",
"Apple",
"Air Conditioner",
"Knife",
"Hockey Stick",
"Paddle",
"Pickup Truck",
"Fork",
"Traffic Sign",
"Balloon",
"Tripod",
"Dog",
"Spoon",
"Clock",
"Pot",
"Cow",
"Cake",
"Dinning Table",
"Sheep",
"Hanger",
"Blackboard-Whiteboard",
"Napkin",
"Other Fish",
"Orange-Tangerine",
"Toiletry",
"Keyboard",
"Tomato",
"Lantern",
"Machinery Vehicle",
"Fan",
"Green Vegetables",
"Banana",
"Baseball Glove",
"Airplane",
"Mouse",
"Train",
"Pumpkin",
"Soccer",
"Skiboard",
"Luggage",
"Nightstand",
"Tea pot",
"Telephone",
"Trolley",
"Head Phone",
"Sports Car",
"Stop Sign",
"Dessert",
"Scooter",
"Stroller",
"Crane",
"Remote",
"Refrigerator",
"Oven",
"Lemon",
"Duck",
"Baseball Bat",
"Surveillance Camera",
"Cat",
"Jug",
"Broccoli",
"Piano",
"Pizza",
"Elephant",
"Skateboard",
"Surfboard",
"Gun",
"Skating and Skiing shoes",
"Gas stove",
"Donut",
"Bow Tie",
"Carrot",
"Toilet",
"Kite",
"Strawberry",
"Other Balls",
"Shovel",
"Pepper",
"Computer Box",
"Toilet Paper",
"Cleaning Products",
"Chopsticks",
"Microwave",
"Pigeon",
"Baseball",
"Cutting-chopping Board",
"Coffee Table",
"Side Table",
"Scissors",
"Marker",
"Pie",
"Ladder",
"Snowboard",
"Cookies",
"Radiator",
"Fire Hydrant",
"Basketball",
"Zebra",
"Grape",
"Giraffe",
"Potato",
"Sausage",
"Tricycle",
"Violin",
"Egg",
"Fire Extinguisher",
"Candy",
"Fire Truck",
"Billiards",
"Converter",
"Bathtub",
"Wheelchair",
"Golf Club",
"Briefcase",
"Cucumber",
"Cigar-Cigarette",
"Paint Brush",
"Pear",
"Heavy Truck",
"Hamburger",
"Extractor",
"Extension Cord",
"Tong",
"Tennis Racket",
"Folder",
"American Football",
"earphone",
"Mask",
"Kettle",
"Tennis",
"Ship",
"Swing",
"Coffee Machine",
"Slide",
"Carriage",
"Onion",
"Green beans",
"Projector",
"Frisbee",
"Washing Machine-Drying Machine",
"Chicken",
"Printer",
"Watermelon",
"Saxophone",
"Tissue",
"Toothbrush",
"Ice cream",
"Hot-air balloon",
"Cello",
"French Fries",
"Scale",
"Trophy",
"Cabbage",
"Hot dog",
"Blender",
"Peach",
"Rice",
"Wallet-Purse",
"Volleyball",
"Deer",
"Goose",
"Tape",
"Tablet",
"Cosmetics",
"Trumpet",
"Pineapple",
"Golf Ball",
"Ambulance",
"Parking meter",
"Mango",
"Key",
"Hurdle",
"Fishing Rod",
"Medal",
"Flute",
"Brush",
"Penguin",
"Megaphone",
"Corn",
"Lettuce",
"Garlic",
"Swan",
"Helicopter",
"Green Onion",
"Sandwich",
"Nuts",
"Speed Limit Sign",
"Induction Cooker",
"Broom",
"Trombone",
"Plum",
"Rickshaw",
"Goldfish",
"Kiwi fruit",
"Router-modem",
"Poker Card",
"Toaster",
"Shrimp",
"Sushi",
"Cheese",
"Notepaper",
"Cherry",
"Pliers",
"CD",
"Pasta",
"Hammer",
"Cue",
"Avocado",
"Hamimelon",
"Flask",
"Mushroom",
"Screwdriver",
"Soap",
"Recorder",
"Bear",
"Eggplant",
"Board Eraser",
"Coconut",
"Tape Measure-Ruler",
"Pig",
"Showerhead",
"Globe",
"Chips",
"Steak",
"Crosswalk Sign",
"Stapler",
"Camel",
"Formula 1",
"Pomegranate",
"Dishwasher",
"Crab",
"Hoverboard",
"Meat ball",
"Rice Cooker",
"Tuba",
"Calculator",
"Papaya",
"Antelope",
"Parrot",
"Seal",
"Butterfly",
"Dumbbell",
"Donkey",
"Lion",
"Urinal",
"Dolphin",
"Electric Drill",
"Hair Dryer",
"Egg tart",
"Jellyfish",
"Treadmill",
"Lighter",
"Grapefruit",
"Game board",
"Mop",
"Radish",
"Baozi",
"Target",
"French",
"Spring Rolls",
"Monkey",
"Rabbit",
"Pencil Case",
"Yak",
"Red Cabbage",
"Binoculars",
"Asparagus",
"Barbell",
"Scallop",
"Noddles",
"Comb",
"Dumpling",
"Oyster",
"Table Tennis paddle",
"Cosmetics Brush-Eyeliner Pencil",
"Chainsaw",
"Eraser",
"Lobster",
"Durian",
"Okra",
"Lipstick",
"Cosmetics Mirror",
"Curling",
"Table Tennis",
]
LABELS = {}
for i, name in enumerate(classLabels):
LABELS[str(i)] = name
IMAGE_FILE_SUFFIXES = (".jpeg", ".jpg")
MAX_IMAGES = 100
# Model config
IOU_THRESHOLD = 0.3
IOU_THRESHOLD_MODEL = 0.3
CONFIDENCE_THRESHOLD = 0.3
IMAGE_SIZE = 640
def main():
parser = ArgumentParser()
parser.add_argument(
"--model-input-path",
type=str,
dest="model_input_path",
default="output/models/yolov5-iOS.mlmodel",
help="path to coreml model",
)
parser.add_argument(
"--image-folder",
type=str,
dest="image_folder",
default="data/images",
help="path to image root folder",
)
parser.add_argument(
"--label-folder",
type=str,
dest="label_folder",
default="data/labels",
help="path to label root folder (folder needs to mirror directory structure of the image folder)",
)
parser.add_argument(
"--metrics_output-directory",
type=str,
dest="metrics_output_directory",
default="output/metrics",
help="path to metrics output folder (will be created if it does not exist)",
)
opt = parser.parse_args()
Path(opt.metrics_output_directory).mkdir(parents=True, exist_ok=True)
allBoundingBoxes = queryFolders(
opt.model_input_path,
opt.image_folder,
opt.label_folder,
opt.metrics_output_directory,
)
# Will scan all subdirectories recursively and write an evulation for all images found in a directory and its child directory
def queryFolders(model, imageFolder, labelsFolder, outputDirectory):
allBoundingBoxes = BoundingBoxes()
for subImageFolder in os.scandir(imageFolder):
# Recursive call for subfolders
if subImageFolder.is_dir():
subBoundingBoxes = queryFolders(
model,
subImageFolder.path,
f"{labelsFolder}/{subImageFolder.name}",
outputDirectory,
)
allBoundingBoxes.addBoundingBoxes(subBoundingBoxes)
boundingBoxes = analyseCurrentDir(model, imageFolder, labelsFolder, outputDirectory)
if boundingBoxes:
allBoundingBoxes.addBoundingBoxes(boundingBoxes)
# Check if directory and subdirectories contain any image at all
if not allBoundingBoxes:
return
metricsOutputFolder = imageFolder.replace("data", outputDirectory) + "/metrics"
Path(metricsOutputFolder).mkdir(parents=True, exist_ok=True)
print(f"Evaluate {imageFolder}")
evaluate(allBoundingBoxes, metricsOutputFolder)
return allBoundingBoxes
def analyseCurrentDir(model, imageFolder, labelsFolder, outputDirectory):
imageEntries = [
imageEntry
for imageEntry in os.scandir(imageFolder)
if imageEntry.name.endswith(IMAGE_FILE_SUFFIXES)
]
if not imageEntries:
return
if not Path(labelsFolder):
print(f"Labels folder {labelsFolder} for {imageFolder} doesn't exist")
return
detectionOutputFolder = imageFolder.replace("data", outputDirectory) + "/detections"
Path(detectionOutputFolder).mkdir(parents=True, exist_ok=True)
imageOutputFolder = imageFolder.replace("data", outputDirectory) + "/images"
Path(imageOutputFolder).mkdir(parents=True, exist_ok=True)
detectCoreML(model, imageEntries, detectionOutputFolder)
boundingBoxes = getBoundingBoxes(labelsFolder, detectionOutputFolder)
drawBoundingBox(imageEntries, boundingBoxes, imageOutputFolder)
return boundingBoxes
def evaluate(boundingBoxes, metricsOutputFolder):
try:
metricsList = Evaluator().PlotPrecisionRecallCurve(
boundingBoxes,
IOUThreshold=IOU_THRESHOLD,
method=MethodAveragePrecision.EveryPointInterpolation,
showAP=True,
showInterpolatedPrecision=True,
savePath=metricsOutputFolder,
showGraphic=False,
)
with open(f"{metricsOutputFolder}/metrics.json", "w") as metricsFile:
json.dump(metricsList, metricsFile, cls=NumpyArrayEncoder)
except Exception as e:
print(e)
def detectCoreML(modelPath, imageEntries, outputFolder):
model = ct.models.MLModel(modelPath, useCPUOnly=True)
maxProcess = MAX_IMAGES
for imageEntry in imageEntries:
maxProcess -= 1
if maxProcess <= 0:
break
inputImage = Image.open(imageEntry.path).resize((640, 640))
out_dict = model.predict(
{
"image": inputImage,
"iouThreshold": IOU_THRESHOLD_MODEL,
"confidenceThreshold": CONFIDENCE_THRESHOLD,
}
)
outFileName = Path(imageEntry.path).stem
outFilePath = f"{outputFolder}/{outFileName}.txt"
with open(outFilePath, "w") as outFile:
for coordinates, confidence in zip(
out_dict["coordinates"], out_dict["confidence"]
):
labelMax = confidence.argmax()
outFile.write(
"{:d} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}\n".format(
labelMax,
coordinates[0],
coordinates[1],
coordinates[2],
coordinates[3],
confidence[labelMax],
)
)
print(f"Image {outFileName} predicted!")
# Convert validation and detection YOLO files into Bounding Box Objects
def getBoundingBoxes(valFolder, detectFolder):
boundingBoxes = BoundingBoxes()
addBoundingBoxes(boundingBoxes, valFolder, isGroundTruth=True)
addBoundingBoxes(boundingBoxes, detectFolder, isGroundTruth=False)
return boundingBoxes
# Convert label YOLO files into Bounding Box Objects
def addBoundingBoxes(boundingBoxes, labelFolder, isGroundTruth):
for labelFileEntry in os.scandir(labelFolder):
if not labelFileEntry.name.endswith(".txt"):
continue
if not Path(labelFileEntry.path):
print(f"Missing label file {labelFileEntry.path}")
continue
imageName = Path(labelFileEntry.path).stem
with open(labelFileEntry.path, "r") as labelFile:
for labelLine in labelFile:
labelNumbers = labelLine.split()
# ignore empty lines
if len(labelNumbers) == 0:
continue
if len(labelNumbers) < 5:
print(
f"Warning: Not enough values in some line in {groundTruthFolder}/{groundTruthFileName}"
)
continue
if isGroundTruth:
bb = BoundingBox(
imageName,
LABELS[labelNumbers[0]],
float(labelNumbers[1]),
float(labelNumbers[2]),
float(labelNumbers[3]),
float(labelNumbers[4]),
CoordinatesType.Relative,
(IMAGE_SIZE, IMAGE_SIZE),
BBType.GroundTruth,
format=BBFormat.XYWH,
)
else:
bb = BoundingBox(
imageName,
LABELS[labelNumbers[0]],
float(labelNumbers[1]),
float(labelNumbers[2]),
float(labelNumbers[3]),
float(labelNumbers[4]),
CoordinatesType.Relative,
(IMAGE_SIZE, IMAGE_SIZE),
BBType.Detected,
float(labelNumbers[5]),
format=BBFormat.XYWH,
)
boundingBoxes.addBoundingBox(bb)
def drawBoundingBox(imageEntries, boundingBoxes, outputFolder):
maxProcess = MAX_IMAGES
for imageEntry in imageEntries:
maxProcess -= 1
if maxProcess <= 0:
break
imageName = Path(imageEntry.path).stem
# Read image and resize to model image size
image = cv2.imread(imageEntry.path)
(originalHeight, originalWidth) = image.shape[:2]
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE))
image = boundingBoxes.drawAllBoundingBoxes(image, imageName)
# Resize image back to original site and write to file
image = cv2.resize(image, (originalWidth, originalHeight))
cv2.imwrite(f"{outputFolder}/{imageEntry.name}", image)
print(f"Image {imageName} boundingBoxes created successfully!") | src/coreml_metrics/main.py |
from PIL import Image
import coremltools as ct
import os
import numpy as np
import cv2
import json
from json import JSONEncoder
from pathlib import Path
from objectDetectionMetrics.BoundingBox import BoundingBox
from objectDetectionMetrics.BoundingBoxes import BoundingBoxes
from objectDetectionMetrics.Evaluator import *
from objectDetectionMetrics.utils import *
from argparse import ArgumentParser
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
classLabels = [
"Person",
"Sneakers",
"Chair",
"Other Shoes",
"Hat",
"Car",
"Lamp",
"Glasses",
"Bottle",
"Desk",
"Cup",
"Street Lights",
"Cabinet-shelf",
"Handbag-Satchel",
"Bracelet",
"Plate",
"Picture-Frame",
"Helmet",
"Book",
"Gloves",
"Storage box",
"Boat",
"Leather Shoes",
"Flower",
"Bench",
"Potted Plant",
"Bowl-Basin",
"Flag",
"Pillow",
"Boots",
"Vase",
"Microphone",
"Necklace",
"Ring",
"SUV",
"Wine Glass",
"Belt",
"Monitor-TV",
"Backpack",
"Umbrella",
"Traffic Light",
"Speaker",
"Watch",
"Tie",
"Trash bin Can",
"Slippers",
"Bicycle",
"Stool",
"Barrel-bucket",
"Van",
"Couch",
"Sandals",
"Basket",
"Drum",
"Pen-Pencil",
"Bus",
"Wild Bird",
"High Heels",
"Motorcycle",
"Guitar",
"Carpet",
"Cell Phone",
"Bread",
"Camera",
"Canned",
"Truck",
"Traffic cone",
"Cymbal",
"Lifesaver",
"Towel",
"Stuffed Toy",
"Candle",
"Sailboat",
"Laptop",
"Awning",
"Bed",
"Faucet",
"Tent",
"Horse",
"Mirror",
"Power outlet",
"Sink",
"Apple",
"Air Conditioner",
"Knife",
"Hockey Stick",
"Paddle",
"Pickup Truck",
"Fork",
"Traffic Sign",
"Balloon",
"Tripod",
"Dog",
"Spoon",
"Clock",
"Pot",
"Cow",
"Cake",
"Dinning Table",
"Sheep",
"Hanger",
"Blackboard-Whiteboard",
"Napkin",
"Other Fish",
"Orange-Tangerine",
"Toiletry",
"Keyboard",
"Tomato",
"Lantern",
"Machinery Vehicle",
"Fan",
"Green Vegetables",
"Banana",
"Baseball Glove",
"Airplane",
"Mouse",
"Train",
"Pumpkin",
"Soccer",
"Skiboard",
"Luggage",
"Nightstand",
"Tea pot",
"Telephone",
"Trolley",
"Head Phone",
"Sports Car",
"Stop Sign",
"Dessert",
"Scooter",
"Stroller",
"Crane",
"Remote",
"Refrigerator",
"Oven",
"Lemon",
"Duck",
"Baseball Bat",
"Surveillance Camera",
"Cat",
"Jug",
"Broccoli",
"Piano",
"Pizza",
"Elephant",
"Skateboard",
"Surfboard",
"Gun",
"Skating and Skiing shoes",
"Gas stove",
"Donut",
"Bow Tie",
"Carrot",
"Toilet",
"Kite",
"Strawberry",
"Other Balls",
"Shovel",
"Pepper",
"Computer Box",
"Toilet Paper",
"Cleaning Products",
"Chopsticks",
"Microwave",
"Pigeon",
"Baseball",
"Cutting-chopping Board",
"Coffee Table",
"Side Table",
"Scissors",
"Marker",
"Pie",
"Ladder",
"Snowboard",
"Cookies",
"Radiator",
"Fire Hydrant",
"Basketball",
"Zebra",
"Grape",
"Giraffe",
"Potato",
"Sausage",
"Tricycle",
"Violin",
"Egg",
"Fire Extinguisher",
"Candy",
"Fire Truck",
"Billiards",
"Converter",
"Bathtub",
"Wheelchair",
"Golf Club",
"Briefcase",
"Cucumber",
"Cigar-Cigarette",
"Paint Brush",
"Pear",
"Heavy Truck",
"Hamburger",
"Extractor",
"Extension Cord",
"Tong",
"Tennis Racket",
"Folder",
"American Football",
"earphone",
"Mask",
"Kettle",
"Tennis",
"Ship",
"Swing",
"Coffee Machine",
"Slide",
"Carriage",
"Onion",
"Green beans",
"Projector",
"Frisbee",
"Washing Machine-Drying Machine",
"Chicken",
"Printer",
"Watermelon",
"Saxophone",
"Tissue",
"Toothbrush",
"Ice cream",
"Hot-air balloon",
"Cello",
"French Fries",
"Scale",
"Trophy",
"Cabbage",
"Hot dog",
"Blender",
"Peach",
"Rice",
"Wallet-Purse",
"Volleyball",
"Deer",
"Goose",
"Tape",
"Tablet",
"Cosmetics",
"Trumpet",
"Pineapple",
"Golf Ball",
"Ambulance",
"Parking meter",
"Mango",
"Key",
"Hurdle",
"Fishing Rod",
"Medal",
"Flute",
"Brush",
"Penguin",
"Megaphone",
"Corn",
"Lettuce",
"Garlic",
"Swan",
"Helicopter",
"Green Onion",
"Sandwich",
"Nuts",
"Speed Limit Sign",
"Induction Cooker",
"Broom",
"Trombone",
"Plum",
"Rickshaw",
"Goldfish",
"Kiwi fruit",
"Router-modem",
"Poker Card",
"Toaster",
"Shrimp",
"Sushi",
"Cheese",
"Notepaper",
"Cherry",
"Pliers",
"CD",
"Pasta",
"Hammer",
"Cue",
"Avocado",
"Hamimelon",
"Flask",
"Mushroom",
"Screwdriver",
"Soap",
"Recorder",
"Bear",
"Eggplant",
"Board Eraser",
"Coconut",
"Tape Measure-Ruler",
"Pig",
"Showerhead",
"Globe",
"Chips",
"Steak",
"Crosswalk Sign",
"Stapler",
"Camel",
"Formula 1",
"Pomegranate",
"Dishwasher",
"Crab",
"Hoverboard",
"Meat ball",
"Rice Cooker",
"Tuba",
"Calculator",
"Papaya",
"Antelope",
"Parrot",
"Seal",
"Butterfly",
"Dumbbell",
"Donkey",
"Lion",
"Urinal",
"Dolphin",
"Electric Drill",
"Hair Dryer",
"Egg tart",
"Jellyfish",
"Treadmill",
"Lighter",
"Grapefruit",
"Game board",
"Mop",
"Radish",
"Baozi",
"Target",
"French",
"Spring Rolls",
"Monkey",
"Rabbit",
"Pencil Case",
"Yak",
"Red Cabbage",
"Binoculars",
"Asparagus",
"Barbell",
"Scallop",
"Noddles",
"Comb",
"Dumpling",
"Oyster",
"Table Tennis paddle",
"Cosmetics Brush-Eyeliner Pencil",
"Chainsaw",
"Eraser",
"Lobster",
"Durian",
"Okra",
"Lipstick",
"Cosmetics Mirror",
"Curling",
"Table Tennis",
]
LABELS = {}
for i, name in enumerate(classLabels):
LABELS[str(i)] = name
IMAGE_FILE_SUFFIXES = (".jpeg", ".jpg")
MAX_IMAGES = 100
# Model config
IOU_THRESHOLD = 0.3
IOU_THRESHOLD_MODEL = 0.3
CONFIDENCE_THRESHOLD = 0.3
IMAGE_SIZE = 640
def main():
parser = ArgumentParser()
parser.add_argument(
"--model-input-path",
type=str,
dest="model_input_path",
default="output/models/yolov5-iOS.mlmodel",
help="path to coreml model",
)
parser.add_argument(
"--image-folder",
type=str,
dest="image_folder",
default="data/images",
help="path to image root folder",
)
parser.add_argument(
"--label-folder",
type=str,
dest="label_folder",
default="data/labels",
help="path to label root folder (folder needs to mirror directory structure of the image folder)",
)
parser.add_argument(
"--metrics_output-directory",
type=str,
dest="metrics_output_directory",
default="output/metrics",
help="path to metrics output folder (will be created if it does not exist)",
)
opt = parser.parse_args()
Path(opt.metrics_output_directory).mkdir(parents=True, exist_ok=True)
allBoundingBoxes = queryFolders(
opt.model_input_path,
opt.image_folder,
opt.label_folder,
opt.metrics_output_directory,
)
# Will scan all subdirectories recursively and write an evulation for all images found in a directory and its child directory
def queryFolders(model, imageFolder, labelsFolder, outputDirectory):
allBoundingBoxes = BoundingBoxes()
for subImageFolder in os.scandir(imageFolder):
# Recursive call for subfolders
if subImageFolder.is_dir():
subBoundingBoxes = queryFolders(
model,
subImageFolder.path,
f"{labelsFolder}/{subImageFolder.name}",
outputDirectory,
)
allBoundingBoxes.addBoundingBoxes(subBoundingBoxes)
boundingBoxes = analyseCurrentDir(model, imageFolder, labelsFolder, outputDirectory)
if boundingBoxes:
allBoundingBoxes.addBoundingBoxes(boundingBoxes)
# Check if directory and subdirectories contain any image at all
if not allBoundingBoxes:
return
metricsOutputFolder = imageFolder.replace("data", outputDirectory) + "/metrics"
Path(metricsOutputFolder).mkdir(parents=True, exist_ok=True)
print(f"Evaluate {imageFolder}")
evaluate(allBoundingBoxes, metricsOutputFolder)
return allBoundingBoxes
def analyseCurrentDir(model, imageFolder, labelsFolder, outputDirectory):
imageEntries = [
imageEntry
for imageEntry in os.scandir(imageFolder)
if imageEntry.name.endswith(IMAGE_FILE_SUFFIXES)
]
if not imageEntries:
return
if not Path(labelsFolder):
print(f"Labels folder {labelsFolder} for {imageFolder} doesn't exist")
return
detectionOutputFolder = imageFolder.replace("data", outputDirectory) + "/detections"
Path(detectionOutputFolder).mkdir(parents=True, exist_ok=True)
imageOutputFolder = imageFolder.replace("data", outputDirectory) + "/images"
Path(imageOutputFolder).mkdir(parents=True, exist_ok=True)
detectCoreML(model, imageEntries, detectionOutputFolder)
boundingBoxes = getBoundingBoxes(labelsFolder, detectionOutputFolder)
drawBoundingBox(imageEntries, boundingBoxes, imageOutputFolder)
return boundingBoxes
def evaluate(boundingBoxes, metricsOutputFolder):
try:
metricsList = Evaluator().PlotPrecisionRecallCurve(
boundingBoxes,
IOUThreshold=IOU_THRESHOLD,
method=MethodAveragePrecision.EveryPointInterpolation,
showAP=True,
showInterpolatedPrecision=True,
savePath=metricsOutputFolder,
showGraphic=False,
)
with open(f"{metricsOutputFolder}/metrics.json", "w") as metricsFile:
json.dump(metricsList, metricsFile, cls=NumpyArrayEncoder)
except Exception as e:
print(e)
def detectCoreML(modelPath, imageEntries, outputFolder):
model = ct.models.MLModel(modelPath, useCPUOnly=True)
maxProcess = MAX_IMAGES
for imageEntry in imageEntries:
maxProcess -= 1
if maxProcess <= 0:
break
inputImage = Image.open(imageEntry.path).resize((640, 640))
out_dict = model.predict(
{
"image": inputImage,
"iouThreshold": IOU_THRESHOLD_MODEL,
"confidenceThreshold": CONFIDENCE_THRESHOLD,
}
)
outFileName = Path(imageEntry.path).stem
outFilePath = f"{outputFolder}/{outFileName}.txt"
with open(outFilePath, "w") as outFile:
for coordinates, confidence in zip(
out_dict["coordinates"], out_dict["confidence"]
):
labelMax = confidence.argmax()
outFile.write(
"{:d} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}\n".format(
labelMax,
coordinates[0],
coordinates[1],
coordinates[2],
coordinates[3],
confidence[labelMax],
)
)
print(f"Image {outFileName} predicted!")
# Convert validation and detection YOLO files into Bounding Box Objects
def getBoundingBoxes(valFolder, detectFolder):
boundingBoxes = BoundingBoxes()
addBoundingBoxes(boundingBoxes, valFolder, isGroundTruth=True)
addBoundingBoxes(boundingBoxes, detectFolder, isGroundTruth=False)
return boundingBoxes
# Convert label YOLO files into Bounding Box Objects
def addBoundingBoxes(boundingBoxes, labelFolder, isGroundTruth):
for labelFileEntry in os.scandir(labelFolder):
if not labelFileEntry.name.endswith(".txt"):
continue
if not Path(labelFileEntry.path):
print(f"Missing label file {labelFileEntry.path}")
continue
imageName = Path(labelFileEntry.path).stem
with open(labelFileEntry.path, "r") as labelFile:
for labelLine in labelFile:
labelNumbers = labelLine.split()
# ignore empty lines
if len(labelNumbers) == 0:
continue
if len(labelNumbers) < 5:
print(
f"Warning: Not enough values in some line in {groundTruthFolder}/{groundTruthFileName}"
)
continue
if isGroundTruth:
bb = BoundingBox(
imageName,
LABELS[labelNumbers[0]],
float(labelNumbers[1]),
float(labelNumbers[2]),
float(labelNumbers[3]),
float(labelNumbers[4]),
CoordinatesType.Relative,
(IMAGE_SIZE, IMAGE_SIZE),
BBType.GroundTruth,
format=BBFormat.XYWH,
)
else:
bb = BoundingBox(
imageName,
LABELS[labelNumbers[0]],
float(labelNumbers[1]),
float(labelNumbers[2]),
float(labelNumbers[3]),
float(labelNumbers[4]),
CoordinatesType.Relative,
(IMAGE_SIZE, IMAGE_SIZE),
BBType.Detected,
float(labelNumbers[5]),
format=BBFormat.XYWH,
)
boundingBoxes.addBoundingBox(bb)
def drawBoundingBox(imageEntries, boundingBoxes, outputFolder):
maxProcess = MAX_IMAGES
for imageEntry in imageEntries:
maxProcess -= 1
if maxProcess <= 0:
break
imageName = Path(imageEntry.path).stem
# Read image and resize to model image size
image = cv2.imread(imageEntry.path)
(originalHeight, originalWidth) = image.shape[:2]
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE))
image = boundingBoxes.drawAllBoundingBoxes(image, imageName)
# Resize image back to original site and write to file
image = cv2.resize(image, (originalWidth, originalHeight))
cv2.imwrite(f"{outputFolder}/{imageEntry.name}", image)
print(f"Image {imageName} boundingBoxes created successfully!") | 0.505615 | 0.371393 |
from sqlite3 import connect
from datetime import datetime
from event import Event
from datetime_functions import date_with_dots
class EventsStorage:
"""
Creates database with table 'races' to store info about events.
Manages and stores class Event instances
"""
def __init__(self, db_file):
self.connection = connect(db_file)
self.cursor = self.connection.cursor()
self.cursor.execute('''CREATE TABLE IF NOT EXISTS races (
name TEXT,
date TEXT,
place TEXT,
distance TEXT,
url TEXT UNIQUE
)'''
)
def __del__(self):
self.connection.commit()
self.connection.close()
def store(self, events):
"""
Inserts events into table 'races' in database
:param events: list of class Event instances
:return: None
"""
print('Inserting events into database')
insert = []
for e in events:
insert.append(e.to_tuple())
self.cursor.executemany('''INSERT OR REPLACE INTO races
VALUES (?, ?, ?, ?, ?)''', insert)
self.connection.commit()
def read(self, event_date=None):
"""
Selects events with date equals to event_date. If event_date is not
given, selects all events from the table 'races'
:param event_date: datetime.date instace
:return: list of class Event instances
"""
print('Selecting events from database')
if event_date is None:
self.cursor.execute('''SELECT name, date, place, distance, url
FROM races''')
else:
self.cursor.execute('''SELECT name, date, place, distance, url
FROM races WHERE date=?''', (event_date, ))
table = self.cursor.fetchall()
events = []
for row in table:
events.append(Event(row[0], datetime.strptime(row[1], '%Y-%m-%d'),
row[2], row[3], row[4]))
return events
def count(self):
"""
Counts events in database
:return: int, number of rows (events) in the table races
"""
self.cursor.execute('SELECT COUNT(*) FROM races')
return self.cursor.fetchone()[0]
def remove(self, event):
"""
Removes event from table 'races' in database
:param event: class Event instace, to remove
:return: None
"""
print('Removing event from database: ' + event.name)
self.cursor.execute('DELETE FROM races WHERE url=?', (event.url, ))
self.connection.commit()
if __name__ == '__main__':
pass | src/eventsstorage.py | from sqlite3 import connect
from datetime import datetime
from event import Event
from datetime_functions import date_with_dots
class EventsStorage:
"""
Creates database with table 'races' to store info about events.
Manages and stores class Event instances
"""
def __init__(self, db_file):
self.connection = connect(db_file)
self.cursor = self.connection.cursor()
self.cursor.execute('''CREATE TABLE IF NOT EXISTS races (
name TEXT,
date TEXT,
place TEXT,
distance TEXT,
url TEXT UNIQUE
)'''
)
def __del__(self):
self.connection.commit()
self.connection.close()
def store(self, events):
"""
Inserts events into table 'races' in database
:param events: list of class Event instances
:return: None
"""
print('Inserting events into database')
insert = []
for e in events:
insert.append(e.to_tuple())
self.cursor.executemany('''INSERT OR REPLACE INTO races
VALUES (?, ?, ?, ?, ?)''', insert)
self.connection.commit()
def read(self, event_date=None):
"""
Selects events with date equals to event_date. If event_date is not
given, selects all events from the table 'races'
:param event_date: datetime.date instace
:return: list of class Event instances
"""
print('Selecting events from database')
if event_date is None:
self.cursor.execute('''SELECT name, date, place, distance, url
FROM races''')
else:
self.cursor.execute('''SELECT name, date, place, distance, url
FROM races WHERE date=?''', (event_date, ))
table = self.cursor.fetchall()
events = []
for row in table:
events.append(Event(row[0], datetime.strptime(row[1], '%Y-%m-%d'),
row[2], row[3], row[4]))
return events
def count(self):
"""
Counts events in database
:return: int, number of rows (events) in the table races
"""
self.cursor.execute('SELECT COUNT(*) FROM races')
return self.cursor.fetchone()[0]
def remove(self, event):
"""
Removes event from table 'races' in database
:param event: class Event instace, to remove
:return: None
"""
print('Removing event from database: ' + event.name)
self.cursor.execute('DELETE FROM races WHERE url=?', (event.url, ))
self.connection.commit()
if __name__ == '__main__':
pass | 0.632503 | 0.122812 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete_process_definition.proto',
package='process_definition',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1f\x64\x65lete_process_definition.proto\x12\x12process_definition\"6\n\x1e\x44\x65leteProcessDefinitionRequest\x12\x14\n\x0c\x64\x65\x66initionId\x18\x01 \x01(\t\"5\n\x1f\x44\x65leteProcessDefinitionResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"\x9d\x01\n&DeleteProcessDefinitionResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x41\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x33.process_definition.DeleteProcessDefinitionResponseb\x06proto3')
)
_DELETEPROCESSDEFINITIONREQUEST = _descriptor.Descriptor(
name='DeleteProcessDefinitionRequest',
full_name='process_definition.DeleteProcessDefinitionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='definitionId', full_name='process_definition.DeleteProcessDefinitionRequest.definitionId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=55,
serialized_end=109,
)
_DELETEPROCESSDEFINITIONRESPONSE = _descriptor.Descriptor(
name='DeleteProcessDefinitionResponse',
full_name='process_definition.DeleteProcessDefinitionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='process_definition.DeleteProcessDefinitionResponse.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=111,
serialized_end=164,
)
_DELETEPROCESSDEFINITIONRESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteProcessDefinitionResponseWrapper',
full_name='process_definition.DeleteProcessDefinitionResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=167,
serialized_end=324,
)
_DELETEPROCESSDEFINITIONRESPONSEWRAPPER.fields_by_name['data'].message_type = _DELETEPROCESSDEFINITIONRESPONSE
DESCRIPTOR.message_types_by_name['DeleteProcessDefinitionRequest'] = _DELETEPROCESSDEFINITIONREQUEST
DESCRIPTOR.message_types_by_name['DeleteProcessDefinitionResponse'] = _DELETEPROCESSDEFINITIONRESPONSE
DESCRIPTOR.message_types_by_name['DeleteProcessDefinitionResponseWrapper'] = _DELETEPROCESSDEFINITIONRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteProcessDefinitionRequest = _reflection.GeneratedProtocolMessageType('DeleteProcessDefinitionRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEPROCESSDEFINITIONREQUEST,
'__module__' : 'delete_process_definition_pb2'
# @@protoc_insertion_point(class_scope:process_definition.DeleteProcessDefinitionRequest)
})
_sym_db.RegisterMessage(DeleteProcessDefinitionRequest)
DeleteProcessDefinitionResponse = _reflection.GeneratedProtocolMessageType('DeleteProcessDefinitionResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEPROCESSDEFINITIONRESPONSE,
'__module__' : 'delete_process_definition_pb2'
# @@protoc_insertion_point(class_scope:process_definition.DeleteProcessDefinitionResponse)
})
_sym_db.RegisterMessage(DeleteProcessDefinitionResponse)
DeleteProcessDefinitionResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteProcessDefinitionResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETEPROCESSDEFINITIONRESPONSEWRAPPER,
'__module__' : 'delete_process_definition_pb2'
# @@protoc_insertion_point(class_scope:process_definition.DeleteProcessDefinitionResponseWrapper)
})
_sym_db.RegisterMessage(DeleteProcessDefinitionResponseWrapper)
# @@protoc_insertion_point(module_scope) | flowable_service_sdk/api/process_definition/delete_process_definition_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete_process_definition.proto',
package='process_definition',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1f\x64\x65lete_process_definition.proto\x12\x12process_definition\"6\n\x1e\x44\x65leteProcessDefinitionRequest\x12\x14\n\x0c\x64\x65\x66initionId\x18\x01 \x01(\t\"5\n\x1f\x44\x65leteProcessDefinitionResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"\x9d\x01\n&DeleteProcessDefinitionResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x41\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x33.process_definition.DeleteProcessDefinitionResponseb\x06proto3')
)
_DELETEPROCESSDEFINITIONREQUEST = _descriptor.Descriptor(
name='DeleteProcessDefinitionRequest',
full_name='process_definition.DeleteProcessDefinitionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='definitionId', full_name='process_definition.DeleteProcessDefinitionRequest.definitionId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=55,
serialized_end=109,
)
_DELETEPROCESSDEFINITIONRESPONSE = _descriptor.Descriptor(
name='DeleteProcessDefinitionResponse',
full_name='process_definition.DeleteProcessDefinitionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='process_definition.DeleteProcessDefinitionResponse.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=111,
serialized_end=164,
)
_DELETEPROCESSDEFINITIONRESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteProcessDefinitionResponseWrapper',
full_name='process_definition.DeleteProcessDefinitionResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='process_definition.DeleteProcessDefinitionResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=167,
serialized_end=324,
)
_DELETEPROCESSDEFINITIONRESPONSEWRAPPER.fields_by_name['data'].message_type = _DELETEPROCESSDEFINITIONRESPONSE
DESCRIPTOR.message_types_by_name['DeleteProcessDefinitionRequest'] = _DELETEPROCESSDEFINITIONREQUEST
DESCRIPTOR.message_types_by_name['DeleteProcessDefinitionResponse'] = _DELETEPROCESSDEFINITIONRESPONSE
DESCRIPTOR.message_types_by_name['DeleteProcessDefinitionResponseWrapper'] = _DELETEPROCESSDEFINITIONRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteProcessDefinitionRequest = _reflection.GeneratedProtocolMessageType('DeleteProcessDefinitionRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEPROCESSDEFINITIONREQUEST,
'__module__' : 'delete_process_definition_pb2'
# @@protoc_insertion_point(class_scope:process_definition.DeleteProcessDefinitionRequest)
})
_sym_db.RegisterMessage(DeleteProcessDefinitionRequest)
DeleteProcessDefinitionResponse = _reflection.GeneratedProtocolMessageType('DeleteProcessDefinitionResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEPROCESSDEFINITIONRESPONSE,
'__module__' : 'delete_process_definition_pb2'
# @@protoc_insertion_point(class_scope:process_definition.DeleteProcessDefinitionResponse)
})
_sym_db.RegisterMessage(DeleteProcessDefinitionResponse)
DeleteProcessDefinitionResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteProcessDefinitionResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETEPROCESSDEFINITIONRESPONSEWRAPPER,
'__module__' : 'delete_process_definition_pb2'
# @@protoc_insertion_point(class_scope:process_definition.DeleteProcessDefinitionResponseWrapper)
})
_sym_db.RegisterMessage(DeleteProcessDefinitionResponseWrapper)
# @@protoc_insertion_point(module_scope) | 0.189334 | 0.092237 |
import json
import pickle
import numpy as np
#=======================================================================
PERIODIC_DICT = {'True': 1, 'False': 0}
#=======================================================================
def pickle_load(file_name):
return pickle.load(open(file_name, 'rb'))
def pickle_dump(dump_dict, file_name):
pickle.dump(dump_dict, open(file_name, 'wb'))
#=======================================================================
class ParserJSON(object):
def __init__(self, file_name = None):
self.file_name = file_name
def parse(self, file_name = None):
if file_name:
self.json = json.loads(file_name).read()
else:
self.json = json.loads(open(self.file_name).read())
self.param_dict = self.json
#=======================================================================
class VarDictParser(object):
def __init__(self, var_dicts):
# we need to sort variables by type
self.var_dicts = var_dicts
self.total_size = 0
self.complete_size = 0
# store all information about all variables
self._store_all_infos()
# learn types of each variable
self._store_variable_types()
# get infos broken up by variable type
self._store_type_specific_infos()
def _store_all_infos(self):
# get lists for storing all variables
for attr in ['var_sizes', 'var_names', 'var_lows', 'var_highs', 'var_types', 'var_options', 'var_keep_num', 'var_periodic', 'var_ranges']:
setattr(self, attr, [])
for attr in ['var_p_sizes', 'var_p_names', 'var_p_lows', 'var_p_highs', 'var_p_types', 'var_p_options', 'var_p_keep_num', 'var_p_periodic', 'var_p_ranges']:
setattr(self, attr, [])
for attr in ['var_e_sizes', 'var_e_names', 'var_e_lows', 'var_e_highs', 'var_e_types', 'var_e_options', 'var_e_begin', 'var_e_end', 'var_e_keep_num', 'var_e_periodic', 'var_e_ranges']:
setattr(self, attr, [])
# and store information
for var_dict in self.var_dicts:
var_name = list(var_dict)[0]
var_size = var_dict[var_name]['size']
self.total_size += var_size
self.var_names.append(var_name)
self.var_sizes.append(var_size)
if 'keep_num' in var_dict[var_name]:
self.var_keep_num.append(var_dict[var_name]['keep_num'])
else:
self.var_keep_num.append(var_dict[var_name]['size'])
if 'periodic' in var_dict[var_name]:
self.var_periodic.append(PERIODIC_DICT[var_dict[var_name]['periodic']])
else:
self.var_periodic.append(PERIODIC_DICT['False'])
self.var_types.append(var_dict[var_name]['type'])
if 'low' in var_dict[var_name].keys():
self.var_lows.append(float(var_dict[var_name]['low']))
self.var_highs.append(float(var_dict[var_name]['high']))
self.var_ranges.append(float(var_dict[var_name]['high']) - float(var_dict[var_name]['low']))
self.var_options.append('')
else:
self.var_lows.append(0.)
self.var_highs.append(1.)
self.var_options.append(var_dict[var_name]['options'])
self.var_p_names.extend([var_name for i in range(var_size)])
self.var_p_sizes.extend([var_size for i in range(var_size)])
if 'keep_num' in var_dict[var_name]:
self.var_p_keep_num.extend([var_dict[var_name]['keep_num'] for i in range(var_size)])
else:
self.var_p_keep_num.extend([var_dict[var_name]['size'] for i in range(var_size)])
if 'periodic' in var_dict[var_name]:
self.var_p_periodic.extend([PERIODIC_DICT[var_dict[var_name]['periodic']] for i in range(var_size)])
else:
self.var_p_periodic.extend([PERIODIC_DICT['False'] for i in range(var_size)])
self.var_p_types.extend([var_dict[var_name]['type'] for i in range(var_size)])
if 'low' in var_dict[var_name].keys():
self.var_p_lows.extend([float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_p_highs.extend([float(var_dict[var_name]['high']) for i in range(var_size)])
self.var_p_ranges.extend([float(var_dict[var_name]['high']) - float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_p_options.extend(['' for i in range(var_size)])
else:
self.var_p_lows.extend([0. for i in range(var_size)])
self.var_p_highs.extend([1. for i in range(var_size)])
self.var_p_options.extend([var_dict[var_name]['options'] for i in range(var_size)])
if 'options' in var_dict[var_name].keys():
var_size *= len(var_dict[var_name]['options'])
begin_index = self.complete_size
self.complete_size += var_size
end_index = self.complete_size
self.var_e_names.extend([var_name for i in range(var_size)])
self.var_e_sizes.extend([var_size for i in range(var_size)])
if 'keep_num' in var_dict[var_name]:
self.var_e_keep_num.extend([var_dict[var_name]['keep_num'] for i in range(var_size)])
else:
self.var_e_keep_num.extend([var_dict[var_name]['size'] for i in range(var_size)])
if 'periodic' in var_dict[var_name]:
self.var_e_periodic.extend([PERIODIC_DICT[var_dict[var_name]['periodic']] for i in range(var_size)])
else:
self.var_e_periodic.extend([PERIODIC_DICT['False'] for i in range(var_size)])
self.var_e_types.extend([var_dict[var_name]['type'] for i in range(var_size)])
self.var_e_begin.extend([begin_index for i in range(var_size)])
self.var_e_end.extend([end_index for i in range(var_size)])
if 'low' in var_dict[var_name].keys():
self.var_e_lows.extend([float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_e_highs.extend([float(var_dict[var_name]['high']) for i in range(var_size)])
self.var_e_ranges.extend([float(var_dict[var_name]['high']) - float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_e_options.extend(['' for i in range(var_size)])
else:
self.var_e_lows.extend([0. for i in range(var_size)])
self.var_e_highs.extend([1. for i in range(var_size)])
self.var_e_options.extend([var_dict[var_name]['options'] for i in range(var_size)])
# need to convert everything into numpy arrays
for attr in ['var_sizes', 'var_names', 'var_lows', 'var_highs', 'var_types', 'var_options', 'var_periodic', 'var_ranges']:
setattr(self, attr, np.array(getattr(self, attr)))
for attr in ['var_p_sizes', 'var_p_names', 'var_p_lows', 'var_p_highs', 'var_p_types', 'var_p_options', 'var_p_periodic', 'var_p_ranges']:
setattr(self, attr, np.array(getattr(self, attr)))
for attr in ['var_e_sizes', 'var_e_names', 'var_e_lows', 'var_e_highs', 'var_e_types', 'var_e_options', 'var_e_periodic', 'var_e_ranges']:
setattr(self, attr, np.array(getattr(self, attr)))
self.var_periodic.astype(np.int32)
self.var_p_periodic.astype(np.int32)
self.var_e_periodic.astype(np.int32)
# get the ranges
self.var_ranges = self.var_highs - self.var_lows
self.var_p_ranges = self.var_p_highs - self.var_p_lows
self.var_e_ranges = self.var_e_highs - self.var_e_lows
# and store everything in dictionaries, just in case
self.var_infos = {'var_names': self.var_names, 'var_sizes': self.var_sizes, 'var_types': self.var_types,
'var_lows': self.var_lows, 'var_highs': self.var_highs, 'var_options': self.var_options}
self.var_p_infos = {'var_p_names': self.var_p_names, 'var_p_sizes': self.var_p_sizes, 'var_p_types': self.var_p_types,
'var_p_lows': self.var_p_lows, 'var_p_highs': self.var_p_highs, 'var_p_options': self.var_p_options,}
def _store_variable_types(self):
# now we need to know the variable type for each entry
for attr in ['_floats', '_ints', '_cats']:
setattr(self, attr, np.array([False for i in range(self.total_size)]))
self.var_p_type_indicators = np.empty(self.total_size)
for var_index, var_type in enumerate(self.var_p_types):
if var_type == 'float':
self._floats[var_index] = True
self.var_p_type_indicators[var_index] = 0
elif var_type == 'integer':
self._ints[var_index] = True
self.var_p_type_indicators[var_index] = 1
elif var_type == 'categorical':
self._cats[var_index] = True
self.var_p_type_indicators[var_index] = 2
else:
raise NotImplementedError()
def _store_type_specific_infos(self):
float_dict = {}
for attr, values in self.var_p_infos.items():
float_dict[attr] = values[self._floats]
self.var_p_infos_floats = float_dict
int_dict = {}
for attr, values in self.var_p_infos.items():
int_dict[attr] = values[self._ints]
self.var_p_infos_ints = int_dict
cat_dict = {}
for attr, values in self.var_p_infos.items():
cat_dict[attr] = values[self._cats]
self.var_p_infos_cats = cat_dict
#=======================================================================
class ObsDictParser(object):
def __init__(self, obs_dicts):
for att in ['loss_names', 'loss_hierarchies', 'loss_types', 'loss_tolerances']:
setattr(self, att, [])
# we need to get information sorted by hierarchy
for obs_dict in obs_dicts:
name = list(obs_dict.keys())[0]
self.loss_names.append(name)
self.loss_hierarchies.append(obs_dict[name]['hierarchy'])
self.loss_types.append(obs_dict[name]['type'])
self.loss_tolerances.append(obs_dict[name]['tolerance'])
sort_indices = np.argsort(self.loss_hierarchies)
for att in ['loss_names', 'loss_hierarchies', 'loss_types', 'loss_tolerances']:
att_list = getattr(self, att)
setattr(self, att, np.array(att_list)[sort_indices])
# for att in ['loss_names', 'loss_hierarchies', 'loss_types', 'loss_tolerances']:
# print(getattr(self, att))
# quit()
#=======================================================================
if __name__ == '__main__':
parser = ParserJSON('config.txt')
parser.parse()
print(parser.param_dict)
quit() | ParamGenerator/Phoenics/Utils/utils.py |
import json
import pickle
import numpy as np
#=======================================================================
PERIODIC_DICT = {'True': 1, 'False': 0}
#=======================================================================
def pickle_load(file_name):
return pickle.load(open(file_name, 'rb'))
def pickle_dump(dump_dict, file_name):
pickle.dump(dump_dict, open(file_name, 'wb'))
#=======================================================================
class ParserJSON(object):
def __init__(self, file_name = None):
self.file_name = file_name
def parse(self, file_name = None):
if file_name:
self.json = json.loads(file_name).read()
else:
self.json = json.loads(open(self.file_name).read())
self.param_dict = self.json
#=======================================================================
class VarDictParser(object):
def __init__(self, var_dicts):
# we need to sort variables by type
self.var_dicts = var_dicts
self.total_size = 0
self.complete_size = 0
# store all information about all variables
self._store_all_infos()
# learn types of each variable
self._store_variable_types()
# get infos broken up by variable type
self._store_type_specific_infos()
def _store_all_infos(self):
# get lists for storing all variables
for attr in ['var_sizes', 'var_names', 'var_lows', 'var_highs', 'var_types', 'var_options', 'var_keep_num', 'var_periodic', 'var_ranges']:
setattr(self, attr, [])
for attr in ['var_p_sizes', 'var_p_names', 'var_p_lows', 'var_p_highs', 'var_p_types', 'var_p_options', 'var_p_keep_num', 'var_p_periodic', 'var_p_ranges']:
setattr(self, attr, [])
for attr in ['var_e_sizes', 'var_e_names', 'var_e_lows', 'var_e_highs', 'var_e_types', 'var_e_options', 'var_e_begin', 'var_e_end', 'var_e_keep_num', 'var_e_periodic', 'var_e_ranges']:
setattr(self, attr, [])
# and store information
for var_dict in self.var_dicts:
var_name = list(var_dict)[0]
var_size = var_dict[var_name]['size']
self.total_size += var_size
self.var_names.append(var_name)
self.var_sizes.append(var_size)
if 'keep_num' in var_dict[var_name]:
self.var_keep_num.append(var_dict[var_name]['keep_num'])
else:
self.var_keep_num.append(var_dict[var_name]['size'])
if 'periodic' in var_dict[var_name]:
self.var_periodic.append(PERIODIC_DICT[var_dict[var_name]['periodic']])
else:
self.var_periodic.append(PERIODIC_DICT['False'])
self.var_types.append(var_dict[var_name]['type'])
if 'low' in var_dict[var_name].keys():
self.var_lows.append(float(var_dict[var_name]['low']))
self.var_highs.append(float(var_dict[var_name]['high']))
self.var_ranges.append(float(var_dict[var_name]['high']) - float(var_dict[var_name]['low']))
self.var_options.append('')
else:
self.var_lows.append(0.)
self.var_highs.append(1.)
self.var_options.append(var_dict[var_name]['options'])
self.var_p_names.extend([var_name for i in range(var_size)])
self.var_p_sizes.extend([var_size for i in range(var_size)])
if 'keep_num' in var_dict[var_name]:
self.var_p_keep_num.extend([var_dict[var_name]['keep_num'] for i in range(var_size)])
else:
self.var_p_keep_num.extend([var_dict[var_name]['size'] for i in range(var_size)])
if 'periodic' in var_dict[var_name]:
self.var_p_periodic.extend([PERIODIC_DICT[var_dict[var_name]['periodic']] for i in range(var_size)])
else:
self.var_p_periodic.extend([PERIODIC_DICT['False'] for i in range(var_size)])
self.var_p_types.extend([var_dict[var_name]['type'] for i in range(var_size)])
if 'low' in var_dict[var_name].keys():
self.var_p_lows.extend([float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_p_highs.extend([float(var_dict[var_name]['high']) for i in range(var_size)])
self.var_p_ranges.extend([float(var_dict[var_name]['high']) - float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_p_options.extend(['' for i in range(var_size)])
else:
self.var_p_lows.extend([0. for i in range(var_size)])
self.var_p_highs.extend([1. for i in range(var_size)])
self.var_p_options.extend([var_dict[var_name]['options'] for i in range(var_size)])
if 'options' in var_dict[var_name].keys():
var_size *= len(var_dict[var_name]['options'])
begin_index = self.complete_size
self.complete_size += var_size
end_index = self.complete_size
self.var_e_names.extend([var_name for i in range(var_size)])
self.var_e_sizes.extend([var_size for i in range(var_size)])
if 'keep_num' in var_dict[var_name]:
self.var_e_keep_num.extend([var_dict[var_name]['keep_num'] for i in range(var_size)])
else:
self.var_e_keep_num.extend([var_dict[var_name]['size'] for i in range(var_size)])
if 'periodic' in var_dict[var_name]:
self.var_e_periodic.extend([PERIODIC_DICT[var_dict[var_name]['periodic']] for i in range(var_size)])
else:
self.var_e_periodic.extend([PERIODIC_DICT['False'] for i in range(var_size)])
self.var_e_types.extend([var_dict[var_name]['type'] for i in range(var_size)])
self.var_e_begin.extend([begin_index for i in range(var_size)])
self.var_e_end.extend([end_index for i in range(var_size)])
if 'low' in var_dict[var_name].keys():
self.var_e_lows.extend([float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_e_highs.extend([float(var_dict[var_name]['high']) for i in range(var_size)])
self.var_e_ranges.extend([float(var_dict[var_name]['high']) - float(var_dict[var_name]['low']) for i in range(var_size)])
self.var_e_options.extend(['' for i in range(var_size)])
else:
self.var_e_lows.extend([0. for i in range(var_size)])
self.var_e_highs.extend([1. for i in range(var_size)])
self.var_e_options.extend([var_dict[var_name]['options'] for i in range(var_size)])
# need to convert everything into numpy arrays
for attr in ['var_sizes', 'var_names', 'var_lows', 'var_highs', 'var_types', 'var_options', 'var_periodic', 'var_ranges']:
setattr(self, attr, np.array(getattr(self, attr)))
for attr in ['var_p_sizes', 'var_p_names', 'var_p_lows', 'var_p_highs', 'var_p_types', 'var_p_options', 'var_p_periodic', 'var_p_ranges']:
setattr(self, attr, np.array(getattr(self, attr)))
for attr in ['var_e_sizes', 'var_e_names', 'var_e_lows', 'var_e_highs', 'var_e_types', 'var_e_options', 'var_e_periodic', 'var_e_ranges']:
setattr(self, attr, np.array(getattr(self, attr)))
self.var_periodic.astype(np.int32)
self.var_p_periodic.astype(np.int32)
self.var_e_periodic.astype(np.int32)
# get the ranges
self.var_ranges = self.var_highs - self.var_lows
self.var_p_ranges = self.var_p_highs - self.var_p_lows
self.var_e_ranges = self.var_e_highs - self.var_e_lows
# and store everything in dictionaries, just in case
self.var_infos = {'var_names': self.var_names, 'var_sizes': self.var_sizes, 'var_types': self.var_types,
'var_lows': self.var_lows, 'var_highs': self.var_highs, 'var_options': self.var_options}
self.var_p_infos = {'var_p_names': self.var_p_names, 'var_p_sizes': self.var_p_sizes, 'var_p_types': self.var_p_types,
'var_p_lows': self.var_p_lows, 'var_p_highs': self.var_p_highs, 'var_p_options': self.var_p_options,}
def _store_variable_types(self):
# now we need to know the variable type for each entry
for attr in ['_floats', '_ints', '_cats']:
setattr(self, attr, np.array([False for i in range(self.total_size)]))
self.var_p_type_indicators = np.empty(self.total_size)
for var_index, var_type in enumerate(self.var_p_types):
if var_type == 'float':
self._floats[var_index] = True
self.var_p_type_indicators[var_index] = 0
elif var_type == 'integer':
self._ints[var_index] = True
self.var_p_type_indicators[var_index] = 1
elif var_type == 'categorical':
self._cats[var_index] = True
self.var_p_type_indicators[var_index] = 2
else:
raise NotImplementedError()
def _store_type_specific_infos(self):
float_dict = {}
for attr, values in self.var_p_infos.items():
float_dict[attr] = values[self._floats]
self.var_p_infos_floats = float_dict
int_dict = {}
for attr, values in self.var_p_infos.items():
int_dict[attr] = values[self._ints]
self.var_p_infos_ints = int_dict
cat_dict = {}
for attr, values in self.var_p_infos.items():
cat_dict[attr] = values[self._cats]
self.var_p_infos_cats = cat_dict
#=======================================================================
class ObsDictParser(object):
def __init__(self, obs_dicts):
for att in ['loss_names', 'loss_hierarchies', 'loss_types', 'loss_tolerances']:
setattr(self, att, [])
# we need to get information sorted by hierarchy
for obs_dict in obs_dicts:
name = list(obs_dict.keys())[0]
self.loss_names.append(name)
self.loss_hierarchies.append(obs_dict[name]['hierarchy'])
self.loss_types.append(obs_dict[name]['type'])
self.loss_tolerances.append(obs_dict[name]['tolerance'])
sort_indices = np.argsort(self.loss_hierarchies)
for att in ['loss_names', 'loss_hierarchies', 'loss_types', 'loss_tolerances']:
att_list = getattr(self, att)
setattr(self, att, np.array(att_list)[sort_indices])
# for att in ['loss_names', 'loss_hierarchies', 'loss_types', 'loss_tolerances']:
# print(getattr(self, att))
# quit()
#=======================================================================
if __name__ == '__main__':
parser = ParserJSON('config.txt')
parser.parse()
print(parser.param_dict)
quit() | 0.072423 | 0.052887 |
import os
import json
import sys
import boto3
import fire
# TODO: Accept command line args
SAVE_TO_FILE_DIRECTORY = './data'
SAVE_TO_FILE_PATH = '{}/ecr__enum_repos_data.json'.format(SAVE_TO_FILE_DIRECTORY)
module_info = {
'name': 'ecr__enum_repos',
'author': '<NAME> of Rhino Security Labs',
'category': 'ENUM',
'one_liner': 'Enumerates ECR repositories.',
'description': 'Enumerates ECR repositories.',
'services': ['ECR'],
'prerequisite_modules': [],
'external_dependencies': [],
'arguments_to_autocomplete': [],
'data_saved': SAVE_TO_FILE_PATH
}
def get_aws_session(profile, region):
return boto3.Session(profile_name=profile, region_name=region)
def get_ecr_repos(ecr_client):
data = []
nextToken = None
try:
while True:
if nextToken is None:
response = ecr_client.describe_repositories(maxResults=2)
else:
response = ecr_client.describe_repositories(maxResults=2, nextToken=nextToken)
if response.get('repositories'):
data.extend(response.get('repositories'))
elif len(data) == 0:
break
if response.get('nextToken'):
nextToken = response['nextToken']
else:
break
except Exception as e:
print(e, file=sys.stderr)
return data
def get_ecr_repo_image_tags(ecr_client, repository_name, tag_status):
data = None
try:
response = ecr_client.list_images(repositoryName=repository_name, filter={
'tagStatus': tag_status
})
if not response['imageIds']:
pass
else:
data = response['imageIds']
except:
pass
return data
def append_image_tags_to_repo(ecr_client, ecr_repos):
for repo in ecr_repos:
image_ids = get_ecr_repo_image_tags(ecr_client, repo['repositoryName'], 'ANY')
if image_ids is not None:
repo.update({'image_ids': image_ids})
def save_to_file(data):
os.makedirs(SAVE_TO_FILE_DIRECTORY, exist_ok=True)
with open(SAVE_TO_FILE_PATH, 'w+') as json_file:
json.dump(data, json_file, indent=4, default=str)
def enum_repos(profile, aws_regions, data):
total = 0
try:
for region in aws_regions:
aws_session = get_aws_session(profile, region)
ecr_client = aws_session.client('ecr')
ecr_repos = get_ecr_repos(ecr_client)
if len(ecr_repos) != 0:
data['payload']['aws_regions'].append(region)
data['payload']['repositories_by_region'].update({
region: ecr_repos
})
append_image_tags_to_repo(ecr_client, ecr_repos)
count = len(ecr_repos)
out = "Found {} repositories in {}".format(count, region)
print(out)
total += count
except Exception as e:
print(e, file=sys.stderr)
data['count'] = total
def main(args):
data = {
'count': 0,
'payload': {
'aws_regions': [],
'repositories_by_region': {}
}
}
enum_repos(args.get('aws_cli_profile'), args.get('aws_regions'), data)
save_to_file(data)
return data
def summary(data):
out = ''
out += 'Total {} ECR Repositories Enumerated\n'.format(data['count'])
out += 'ECR recources saved under {}.\n'.format(module_info['data_saved'])
return out
#python main.py cloudgoat "['us-east-1','us-west-2']"
def set_args(aws_cli_profile, aws_regions):
print(aws_regions)
if type(aws_regions) is list:
args = {
'aws_cli_profile': 'cloudgoat',
'aws_regions': aws_regions
}
else:
print('Regions must be a list.\n\tExample list format:\n\t\tpython main.py <aws-profile> \"[\'us-east-1\',\'us-west-2\']\"', file=sys.stderr)
sys.exit(1)
return args
if __name__ == "__main__":
print('Running module {}...'.format(module_info['name']))
args = fire.Fire(set_args)
data = main(args)
if data is not None:
summary = summary(data)
if len(summary) > 1000:
raise ValueError('The {} module\'s summary is too long ({} characters). Reduce it to 1000 characters or fewer.'.format(module_info['name'], len(summary)))
if not isinstance(summary, str):
raise TypeError(' The {} module\'s summary is {}-type instead of str. Make summary return a string.'.format(module_info['name'], type(summary)))
print('RESULT:')
print(json.dumps(data, indent=4, default=str))
print('{} completed.\n'.format(module_info['name']))
print('MODULE SUMMARY:\n\n{}\n'.format(summary.strip('\n'))) | modules/ecr__enum_repos/main.py | import os
import json
import sys
import boto3
import fire
# TODO: Accept command line args
SAVE_TO_FILE_DIRECTORY = './data'
SAVE_TO_FILE_PATH = '{}/ecr__enum_repos_data.json'.format(SAVE_TO_FILE_DIRECTORY)
module_info = {
'name': 'ecr__enum_repos',
'author': '<NAME> of Rhino Security Labs',
'category': 'ENUM',
'one_liner': 'Enumerates ECR repositories.',
'description': 'Enumerates ECR repositories.',
'services': ['ECR'],
'prerequisite_modules': [],
'external_dependencies': [],
'arguments_to_autocomplete': [],
'data_saved': SAVE_TO_FILE_PATH
}
def get_aws_session(profile, region):
return boto3.Session(profile_name=profile, region_name=region)
def get_ecr_repos(ecr_client):
data = []
nextToken = None
try:
while True:
if nextToken is None:
response = ecr_client.describe_repositories(maxResults=2)
else:
response = ecr_client.describe_repositories(maxResults=2, nextToken=nextToken)
if response.get('repositories'):
data.extend(response.get('repositories'))
elif len(data) == 0:
break
if response.get('nextToken'):
nextToken = response['nextToken']
else:
break
except Exception as e:
print(e, file=sys.stderr)
return data
def get_ecr_repo_image_tags(ecr_client, repository_name, tag_status):
data = None
try:
response = ecr_client.list_images(repositoryName=repository_name, filter={
'tagStatus': tag_status
})
if not response['imageIds']:
pass
else:
data = response['imageIds']
except:
pass
return data
def append_image_tags_to_repo(ecr_client, ecr_repos):
for repo in ecr_repos:
image_ids = get_ecr_repo_image_tags(ecr_client, repo['repositoryName'], 'ANY')
if image_ids is not None:
repo.update({'image_ids': image_ids})
def save_to_file(data):
os.makedirs(SAVE_TO_FILE_DIRECTORY, exist_ok=True)
with open(SAVE_TO_FILE_PATH, 'w+') as json_file:
json.dump(data, json_file, indent=4, default=str)
def enum_repos(profile, aws_regions, data):
total = 0
try:
for region in aws_regions:
aws_session = get_aws_session(profile, region)
ecr_client = aws_session.client('ecr')
ecr_repos = get_ecr_repos(ecr_client)
if len(ecr_repos) != 0:
data['payload']['aws_regions'].append(region)
data['payload']['repositories_by_region'].update({
region: ecr_repos
})
append_image_tags_to_repo(ecr_client, ecr_repos)
count = len(ecr_repos)
out = "Found {} repositories in {}".format(count, region)
print(out)
total += count
except Exception as e:
print(e, file=sys.stderr)
data['count'] = total
def main(args):
data = {
'count': 0,
'payload': {
'aws_regions': [],
'repositories_by_region': {}
}
}
enum_repos(args.get('aws_cli_profile'), args.get('aws_regions'), data)
save_to_file(data)
return data
def summary(data):
out = ''
out += 'Total {} ECR Repositories Enumerated\n'.format(data['count'])
out += 'ECR recources saved under {}.\n'.format(module_info['data_saved'])
return out
#python main.py cloudgoat "['us-east-1','us-west-2']"
def set_args(aws_cli_profile, aws_regions):
print(aws_regions)
if type(aws_regions) is list:
args = {
'aws_cli_profile': 'cloudgoat',
'aws_regions': aws_regions
}
else:
print('Regions must be a list.\n\tExample list format:\n\t\tpython main.py <aws-profile> \"[\'us-east-1\',\'us-west-2\']\"', file=sys.stderr)
sys.exit(1)
return args
if __name__ == "__main__":
print('Running module {}...'.format(module_info['name']))
args = fire.Fire(set_args)
data = main(args)
if data is not None:
summary = summary(data)
if len(summary) > 1000:
raise ValueError('The {} module\'s summary is too long ({} characters). Reduce it to 1000 characters or fewer.'.format(module_info['name'], len(summary)))
if not isinstance(summary, str):
raise TypeError(' The {} module\'s summary is {}-type instead of str. Make summary return a string.'.format(module_info['name'], type(summary)))
print('RESULT:')
print(json.dumps(data, indent=4, default=str))
print('{} completed.\n'.format(module_info['name']))
print('MODULE SUMMARY:\n\n{}\n'.format(summary.strip('\n'))) | 0.147279 | 0.088544 |
import re
from jwt import PyJWTError
from tornado.httputil import HTTPServerRequest
from kairon.shared.account.processor import AccountProcessor
from kairon.shared.authorization.processor import IntegrationProcessor
from kairon.shared.data.constant import TOKEN_TYPE
from kairon.shared.models import User
from kairon.shared.tornado.exception import ServiceHandlerException
from kairon.shared.utils import Utility
from typing import Text
Utility.load_environment()
class TornadoAuthenticate:
@staticmethod
def get_token(request: HTTPServerRequest):
authorization = request.headers.get('Authorization')
token = ""
if authorization:
scheme, token = authorization.split(" ")
return token.strip()
@staticmethod
def get_user_from_token(
token: Text, request: HTTPServerRequest, **kwargs
):
"""
validates jwt token
:param token: jwt token
:param request: http request object
:return: dict of user details
"""
credentials_exception = ServiceHandlerException("Could not validate credentials", 401, {"WWW-Authenticate": "Bearer"})
try:
payload = Utility.decode_limited_access_token(token)
username: str = payload.get("sub")
TornadoAuthenticate.validate_limited_access_token(request, payload.get("access-limit"))
if username is None:
raise credentials_exception
except PyJWTError:
raise credentials_exception
user = AccountProcessor.get_user_details(username)
if user is None:
raise credentials_exception
user_model = User(**user)
if payload.get("type") != TOKEN_TYPE.LOGIN.value:
TornadoAuthenticate.validate_bot_request(kwargs.get('bot'), payload.get('bot'))
if payload.get("type") == TOKEN_TYPE.INTEGRATION.value:
TornadoAuthenticate.validate_integration_token(payload)
alias_user = request.headers.get("X-USER")
if Utility.check_empty_string(alias_user) and payload.get("type") == TOKEN_TYPE.INTEGRATION.value:
raise ServiceHandlerException("Alias user missing for integration", 401)
alias_user = alias_user or username
user_model.alias_user = alias_user
user_model.is_integration_user = True
user_model.role = payload.get('role')
return user_model
@staticmethod
def get_current_user(
request: HTTPServerRequest, **kwargs
):
"""
validates jwt token
:param token: jwt token, default extracted by fastapi
:param request: http request object
:return: dict of user details
"""
token = TornadoAuthenticate.get_token(request)
user = TornadoAuthenticate.get_user_from_token(token, request, **kwargs)
return user
@staticmethod
def get_current_user_and_bot(
request: HTTPServerRequest, **kwargs
):
user = TornadoAuthenticate.get_current_user(request, **kwargs)
bot_id = kwargs.get('bot')
if Utility.check_empty_string(bot_id):
raise ServiceHandlerException("Bot is required", 422, {"WWW-Authenticate": "Bearer"})
if not user.is_integration_user:
AccountProcessor.fetch_role_for_user(user.email, bot_id)
bot = AccountProcessor.get_bot(bot_id)
if not bot["status"]:
raise ServiceHandlerException("Inactive Bot Please contact system admin!", 422, {"WWW-Authenticate": "Bearer"})
user.active_bot = bot_id
return user
@staticmethod
def get_current_user_and_bot_for_channel(
token: Text, bot: Text, request: HTTPServerRequest
):
user = TornadoAuthenticate.get_user_from_token(token, request)
if Utility.check_empty_string(bot):
raise ServiceHandlerException("Bot is required", 422, {"WWW-Authenticate": "Bearer"})
AccountProcessor.fetch_role_for_user(user.email, bot)
bot = AccountProcessor.get_bot(bot)
if not bot["status"]:
raise ServiceHandlerException("Inactive Bot Please contact system admin!", 422, {"WWW-Authenticate": "Bearer"})
user.active_bot = bot
return user
@staticmethod
def validate_limited_access_token(request: HTTPServerRequest, access_limit: list):
if not access_limit:
return
requested_endpoint = request.uri
matches = any(re.match(allowed_endpoint, requested_endpoint) for allowed_endpoint in access_limit)
if not matches:
raise ServiceHandlerException('Access denied for this endpoint', 401)
@staticmethod
def validate_integration_token(payload: dict):
"""
Validates whether integration token with this payload is active.
:param payload: Auth token claims dict.
"""
exception = ServiceHandlerException('Access to bot is denied', 401)
name = payload.get('name')
bot = payload.get('bot')
user = payload.get('sub')
iat = payload.get('iat')
role = payload.get('role')
try:
IntegrationProcessor.verify_integration_token(name, bot, user, iat, role)
except Exception:
raise exception
@staticmethod
def validate_bot_request(bot_in_request_path: str, bot_in_token: str):
"""
Validates the bot which is being accessed is the same bot for which the integration was generated.
:param bot_in_request_path: bot for which the request was made.
:param bot_in_token: bot which is present in auth token claims.
"""
if not Utility.check_empty_string(bot_in_request_path) and bot_in_request_path != bot_in_token:
raise ServiceHandlerException('Access to bot is denied', 401) | kairon/shared/tornado/auth.py | import re
from jwt import PyJWTError
from tornado.httputil import HTTPServerRequest
from kairon.shared.account.processor import AccountProcessor
from kairon.shared.authorization.processor import IntegrationProcessor
from kairon.shared.data.constant import TOKEN_TYPE
from kairon.shared.models import User
from kairon.shared.tornado.exception import ServiceHandlerException
from kairon.shared.utils import Utility
from typing import Text
Utility.load_environment()
class TornadoAuthenticate:
@staticmethod
def get_token(request: HTTPServerRequest):
authorization = request.headers.get('Authorization')
token = ""
if authorization:
scheme, token = authorization.split(" ")
return token.strip()
@staticmethod
def get_user_from_token(
token: Text, request: HTTPServerRequest, **kwargs
):
"""
validates jwt token
:param token: jwt token
:param request: http request object
:return: dict of user details
"""
credentials_exception = ServiceHandlerException("Could not validate credentials", 401, {"WWW-Authenticate": "Bearer"})
try:
payload = Utility.decode_limited_access_token(token)
username: str = payload.get("sub")
TornadoAuthenticate.validate_limited_access_token(request, payload.get("access-limit"))
if username is None:
raise credentials_exception
except PyJWTError:
raise credentials_exception
user = AccountProcessor.get_user_details(username)
if user is None:
raise credentials_exception
user_model = User(**user)
if payload.get("type") != TOKEN_TYPE.LOGIN.value:
TornadoAuthenticate.validate_bot_request(kwargs.get('bot'), payload.get('bot'))
if payload.get("type") == TOKEN_TYPE.INTEGRATION.value:
TornadoAuthenticate.validate_integration_token(payload)
alias_user = request.headers.get("X-USER")
if Utility.check_empty_string(alias_user) and payload.get("type") == TOKEN_TYPE.INTEGRATION.value:
raise ServiceHandlerException("Alias user missing for integration", 401)
alias_user = alias_user or username
user_model.alias_user = alias_user
user_model.is_integration_user = True
user_model.role = payload.get('role')
return user_model
@staticmethod
def get_current_user(
request: HTTPServerRequest, **kwargs
):
"""
validates jwt token
:param token: jwt token, default extracted by fastapi
:param request: http request object
:return: dict of user details
"""
token = TornadoAuthenticate.get_token(request)
user = TornadoAuthenticate.get_user_from_token(token, request, **kwargs)
return user
@staticmethod
def get_current_user_and_bot(
request: HTTPServerRequest, **kwargs
):
user = TornadoAuthenticate.get_current_user(request, **kwargs)
bot_id = kwargs.get('bot')
if Utility.check_empty_string(bot_id):
raise ServiceHandlerException("Bot is required", 422, {"WWW-Authenticate": "Bearer"})
if not user.is_integration_user:
AccountProcessor.fetch_role_for_user(user.email, bot_id)
bot = AccountProcessor.get_bot(bot_id)
if not bot["status"]:
raise ServiceHandlerException("Inactive Bot Please contact system admin!", 422, {"WWW-Authenticate": "Bearer"})
user.active_bot = bot_id
return user
@staticmethod
def get_current_user_and_bot_for_channel(
token: Text, bot: Text, request: HTTPServerRequest
):
user = TornadoAuthenticate.get_user_from_token(token, request)
if Utility.check_empty_string(bot):
raise ServiceHandlerException("Bot is required", 422, {"WWW-Authenticate": "Bearer"})
AccountProcessor.fetch_role_for_user(user.email, bot)
bot = AccountProcessor.get_bot(bot)
if not bot["status"]:
raise ServiceHandlerException("Inactive Bot Please contact system admin!", 422, {"WWW-Authenticate": "Bearer"})
user.active_bot = bot
return user
@staticmethod
def validate_limited_access_token(request: HTTPServerRequest, access_limit: list):
if not access_limit:
return
requested_endpoint = request.uri
matches = any(re.match(allowed_endpoint, requested_endpoint) for allowed_endpoint in access_limit)
if not matches:
raise ServiceHandlerException('Access denied for this endpoint', 401)
@staticmethod
def validate_integration_token(payload: dict):
"""
Validates whether integration token with this payload is active.
:param payload: Auth token claims dict.
"""
exception = ServiceHandlerException('Access to bot is denied', 401)
name = payload.get('name')
bot = payload.get('bot')
user = payload.get('sub')
iat = payload.get('iat')
role = payload.get('role')
try:
IntegrationProcessor.verify_integration_token(name, bot, user, iat, role)
except Exception:
raise exception
@staticmethod
def validate_bot_request(bot_in_request_path: str, bot_in_token: str):
"""
Validates the bot which is being accessed is the same bot for which the integration was generated.
:param bot_in_request_path: bot for which the request was made.
:param bot_in_token: bot which is present in auth token claims.
"""
if not Utility.check_empty_string(bot_in_request_path) and bot_in_request_path != bot_in_token:
raise ServiceHandlerException('Access to bot is denied', 401) | 0.665954 | 0.095223 |
import argparse
import string
class TestGroup:
def __init__(self, name, parent = None):
self.parent = parent
self.name = name
self.testGroups = {}
self.testCases = {}
if parent:
assert not name in parent.testGroups
parent.testGroups[name] = self
def getName (self):
return self.name
def getPath (self):
if self.parent:
return self.parent.getPath() + "." + self.name
else:
return self.name
def hasGroup(self, groupName):
return groupName in self.testGroups
def getGroup(self, groupName):
return self.testGroups[groupName]
def hasTest(self, testName):
return testName in self.testCases
def getTest(self, testName):
return self.testCases[testName]
def hasTestCases(self):
return len(self.testCases) != 0
def hasTestGroups(self):
return len(self.testGroups) != 0
def getTestCases(self):
return self.testCases.values()
def getTestGroups(self):
return self.testGroups.values()
class TestCase:
def __init__(self, name, parent):
self.name = name
self.parent = parent
assert not name in self.parent.testCases
self.parent.testCases[name] = self
def getPath (self):
return self.parent.getPath() + "." + self.name
def getName(self):
return self.name
def addGroupToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def addTestToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if i == len(pathComponents) - 1:
TestCase(component, currentGroup)
else:
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def loadTestHierarchy (input):
line = input.readline()
rootGroup = None
if line.startswith("GROUP: "):
groupName = line[len("GROUP: "):-1]
rootGroup = TestGroup(groupName)
else:
assert False
for line in input:
if line.startswith("GROUP: "):
groupPath = line[len("GROUP: "):-1];
addGroupToHierarchy(rootGroup, groupPath)
elif line.startswith("TEST: "):
testPath = line[len("TEST: "):-1]
addTestToHierarchy(rootGroup, testPath)
else:
assert False
return rootGroup
def hasFilteredCases(group, includeTests):
for child in group.getTestCases():
if child.getPath() in includeTests:
return True
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
return True
return False
def addFilteredTest(parent, group, includeTests):
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
newChild = TestGroup(child.getName(), parent)
addFilteredTest(newChild, child, includeTests)
for child in group.getTestCases():
if child.getPath() in includeTests:
TestCase(child.getName(), parent)
def filterTests(includeTests, group):
root = TestGroup(group.getName())
addFilteredTest(root, group, includeTests)
return root
def writeAndroidCTSTest(test, output):
output.write('<Test name="%s" />\n' % test.getName())
def writeAndroidCTSTestCase(group, output):
assert group.hasTestCases()
assert not group.hasTestGroups()
output.write('<TestCase name="%s">\n' % group.getName())
for testCase in group.getTestCases():
writeAndroidCTSTest(testCase, output)
output.write('</TestCase>\n')
def writeAndroidCTSTestSuite(group, output):
output.write('<TestSuite name="%s">\n' % group.getName())
for childGroup in group.getTestGroups():
if childGroup.hasTestCases():
assert not childGroup.hasTestGroups()
writeAndroidCTSTestCase(childGroup, output)
elif childGroup.hasTestGroups():
writeAndroidCTSTestSuite(childGroup, output)
# \note Skips groups without testcases or child groups
output.write('</TestSuite>\n')
def writeAndroidCTSFile(rootGroup, output, mustpass, name="dEQP-GLES3", appPackageName="com.drawelements.deqp.gles3"):
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<TestPackage name="%s" appPackageName="%s" testType="deqpTest">\n' % (name, appPackageName))
writeAndroidCTSTestSuite(filterTests(mustpass, rootGroup), output)
output.write('</TestPackage>\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('r'), help="Input dEQP test hierarchy in txt format.")
parser.add_argument('output', type=argparse.FileType('w'), help="Output file for Android CTS test file.")
parser.add_argument('--name', dest="name", type=str, required=True, help="Name of the test package")
parser.add_argument('--package', dest="package", type=str, required=True, help="Name of the app package")
parser.add_argument('--must-pass', dest="mustpass", type=argparse.FileType('r'), required=True, help="Must pass file")
args = parser.parse_args()
rootGroup = loadTestHierarchy(args.input)
writeAndroidCTSFile(rootGroup, args.output, name=args.name, appPackageName=args.package, mustpass=set(map(lambda x : x.rstrip(), args.mustpass.readlines()))) | android/scripts/GenAndroidCTSXML.py | import argparse
import string
class TestGroup:
def __init__(self, name, parent = None):
self.parent = parent
self.name = name
self.testGroups = {}
self.testCases = {}
if parent:
assert not name in parent.testGroups
parent.testGroups[name] = self
def getName (self):
return self.name
def getPath (self):
if self.parent:
return self.parent.getPath() + "." + self.name
else:
return self.name
def hasGroup(self, groupName):
return groupName in self.testGroups
def getGroup(self, groupName):
return self.testGroups[groupName]
def hasTest(self, testName):
return testName in self.testCases
def getTest(self, testName):
return self.testCases[testName]
def hasTestCases(self):
return len(self.testCases) != 0
def hasTestGroups(self):
return len(self.testGroups) != 0
def getTestCases(self):
return self.testCases.values()
def getTestGroups(self):
return self.testGroups.values()
class TestCase:
def __init__(self, name, parent):
self.name = name
self.parent = parent
assert not name in self.parent.testCases
self.parent.testCases[name] = self
def getPath (self):
return self.parent.getPath() + "." + self.name
def getName(self):
return self.name
def addGroupToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def addTestToHierarchy(rootGroup, path):
pathComponents = string.split(path, ".")
currentGroup = rootGroup
assert pathComponents[0] == rootGroup.getName()
for i in range(1, len(pathComponents)):
component = pathComponents[i]
if i == len(pathComponents) - 1:
TestCase(component, currentGroup)
else:
if currentGroup.hasGroup(component):
currentGroup = currentGroup.getGroup(component)
else:
currentGroup = TestGroup(component, parent=currentGroup)
def loadTestHierarchy (input):
line = input.readline()
rootGroup = None
if line.startswith("GROUP: "):
groupName = line[len("GROUP: "):-1]
rootGroup = TestGroup(groupName)
else:
assert False
for line in input:
if line.startswith("GROUP: "):
groupPath = line[len("GROUP: "):-1];
addGroupToHierarchy(rootGroup, groupPath)
elif line.startswith("TEST: "):
testPath = line[len("TEST: "):-1]
addTestToHierarchy(rootGroup, testPath)
else:
assert False
return rootGroup
def hasFilteredCases(group, includeTests):
for child in group.getTestCases():
if child.getPath() in includeTests:
return True
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
return True
return False
def addFilteredTest(parent, group, includeTests):
for child in group.getTestGroups():
if hasFilteredCases(child, includeTests):
newChild = TestGroup(child.getName(), parent)
addFilteredTest(newChild, child, includeTests)
for child in group.getTestCases():
if child.getPath() in includeTests:
TestCase(child.getName(), parent)
def filterTests(includeTests, group):
root = TestGroup(group.getName())
addFilteredTest(root, group, includeTests)
return root
def writeAndroidCTSTest(test, output):
output.write('<Test name="%s" />\n' % test.getName())
def writeAndroidCTSTestCase(group, output):
assert group.hasTestCases()
assert not group.hasTestGroups()
output.write('<TestCase name="%s">\n' % group.getName())
for testCase in group.getTestCases():
writeAndroidCTSTest(testCase, output)
output.write('</TestCase>\n')
def writeAndroidCTSTestSuite(group, output):
output.write('<TestSuite name="%s">\n' % group.getName())
for childGroup in group.getTestGroups():
if childGroup.hasTestCases():
assert not childGroup.hasTestGroups()
writeAndroidCTSTestCase(childGroup, output)
elif childGroup.hasTestGroups():
writeAndroidCTSTestSuite(childGroup, output)
# \note Skips groups without testcases or child groups
output.write('</TestSuite>\n')
def writeAndroidCTSFile(rootGroup, output, mustpass, name="dEQP-GLES3", appPackageName="com.drawelements.deqp.gles3"):
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<TestPackage name="%s" appPackageName="%s" testType="deqpTest">\n' % (name, appPackageName))
writeAndroidCTSTestSuite(filterTests(mustpass, rootGroup), output)
output.write('</TestPackage>\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('r'), help="Input dEQP test hierarchy in txt format.")
parser.add_argument('output', type=argparse.FileType('w'), help="Output file for Android CTS test file.")
parser.add_argument('--name', dest="name", type=str, required=True, help="Name of the test package")
parser.add_argument('--package', dest="package", type=str, required=True, help="Name of the app package")
parser.add_argument('--must-pass', dest="mustpass", type=argparse.FileType('r'), required=True, help="Must pass file")
args = parser.parse_args()
rootGroup = loadTestHierarchy(args.input)
writeAndroidCTSFile(rootGroup, args.output, name=args.name, appPackageName=args.package, mustpass=set(map(lambda x : x.rstrip(), args.mustpass.readlines()))) | 0.257672 | 0.425009 |
from bs4 import BeautifulSoup
from covidvu.pipeline.vujson import SITE_DATA
from covidvu.pipeline.vuupdate import SCRAPED_US_DATA
from covidvu.pipeline.vuupdate import SCRAPED_WORLD_DATA
import copy
import csv
import os
# --- constants ---
TABLES_FILES = {
'LOCATION': {
'fileCSV' : SCRAPED_WORLD_DATA,
'fileHTML': None,
'ignoreRows': 6,
'tableHTML' : None,
},
'UNITED STATES': {
'fileCSV' : SCRAPED_US_DATA,
'fileHTML': None,
'ignoreRows': 5,
'tableHTML' : None,
},
'AUSTRALIA': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
'MAINLAND CHINA': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
'CANADA': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
'MUNDO HISPANO': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
}
# +++ functions +++
def detectHTMLTablesRegions(dataLake = SITE_DATA):
"""
Returns a dictionary of file names like:
{
'Global': 'table-00.html',
'US' " 'table-01.html',
}
"""
prefix = 'table-'
filesList = [ os.path.join(dataLake, fileName) for fileName in os.listdir(dataLake) if prefix in fileName and '.swp' not in fileName ]
tablesFiles = copy.deepcopy(TABLES_FILES)
for fileName in filesList:
table = BeautifulSoup(open(fileName).read(), 'html.parser')
for row in table.find_all('tr'):
column = row.find('td')
if not column:
continue
text = column.text.strip().upper()
if text in tablesFiles:
tablesFiles[text]['fileHTML'] = fileName
break
return tablesFiles
def _generateCSVTo(targetFileName, dataSource = None, ignoreRows = 6):
"""
targetFileName ::= where to write the CSV
dataSource ::= the scraped HTML file generated by pyavka.sh
ignoreRows ::= heuristic for skipping the first row headers in the
dataSource.
"""
rowCount = 0
rows = list()
soup = BeautifulSoup(open(dataSource).read(), 'html.parser')
table = soup.find('table')
print('processing %s...' % dataSource)
for tableRow in table.find_all('tr'):
rowCount += 1
if rowCount > ignoreRows:
row = list()
for column in tableRow.find_all('td'):
row.append(column.text.replace(',', '')) # number format comma
if not len(row[0]):
continue # skip to next record
rows.append(row[0:7])
with open(targetFileName, 'w') as outputFile:
csv.writer(outputFile, delimiter = '\t').writerows(rows)
print('generated %s' % targetFileName)
def processHTML2CSV(dataLake = SITE_DATA):
# TODO: This whole thing needs to be cleaned up later.
for _, tableSpec in detectHTMLTablesRegions(dataLake).items():
if tableSpec['fileCSV']:
_generateCSVTo(tableSpec['fileCSV'], tableSpec['fileHTML'], tableSpec['ignoreRows'])
def _main():
processHTML2CSV()
# --- main ---
if '__main__' == __name__:
_main() | work/covidvu/pipeline/pyavka.py |
from bs4 import BeautifulSoup
from covidvu.pipeline.vujson import SITE_DATA
from covidvu.pipeline.vuupdate import SCRAPED_US_DATA
from covidvu.pipeline.vuupdate import SCRAPED_WORLD_DATA
import copy
import csv
import os
# --- constants ---
TABLES_FILES = {
'LOCATION': {
'fileCSV' : SCRAPED_WORLD_DATA,
'fileHTML': None,
'ignoreRows': 6,
'tableHTML' : None,
},
'UNITED STATES': {
'fileCSV' : SCRAPED_US_DATA,
'fileHTML': None,
'ignoreRows': 5,
'tableHTML' : None,
},
'AUSTRALIA': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
'MAINLAND CHINA': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
'CANADA': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
'MUNDO HISPANO': {
'fileCSV' : None,
'fileHTML': None,
'tableHTML' : None,
},
}
# +++ functions +++
def detectHTMLTablesRegions(dataLake = SITE_DATA):
"""
Returns a dictionary of file names like:
{
'Global': 'table-00.html',
'US' " 'table-01.html',
}
"""
prefix = 'table-'
filesList = [ os.path.join(dataLake, fileName) for fileName in os.listdir(dataLake) if prefix in fileName and '.swp' not in fileName ]
tablesFiles = copy.deepcopy(TABLES_FILES)
for fileName in filesList:
table = BeautifulSoup(open(fileName).read(), 'html.parser')
for row in table.find_all('tr'):
column = row.find('td')
if not column:
continue
text = column.text.strip().upper()
if text in tablesFiles:
tablesFiles[text]['fileHTML'] = fileName
break
return tablesFiles
def _generateCSVTo(targetFileName, dataSource = None, ignoreRows = 6):
"""
targetFileName ::= where to write the CSV
dataSource ::= the scraped HTML file generated by pyavka.sh
ignoreRows ::= heuristic for skipping the first row headers in the
dataSource.
"""
rowCount = 0
rows = list()
soup = BeautifulSoup(open(dataSource).read(), 'html.parser')
table = soup.find('table')
print('processing %s...' % dataSource)
for tableRow in table.find_all('tr'):
rowCount += 1
if rowCount > ignoreRows:
row = list()
for column in tableRow.find_all('td'):
row.append(column.text.replace(',', '')) # number format comma
if not len(row[0]):
continue # skip to next record
rows.append(row[0:7])
with open(targetFileName, 'w') as outputFile:
csv.writer(outputFile, delimiter = '\t').writerows(rows)
print('generated %s' % targetFileName)
def processHTML2CSV(dataLake = SITE_DATA):
# TODO: This whole thing needs to be cleaned up later.
for _, tableSpec in detectHTMLTablesRegions(dataLake).items():
if tableSpec['fileCSV']:
_generateCSVTo(tableSpec['fileCSV'], tableSpec['fileHTML'], tableSpec['ignoreRows'])
def _main():
processHTML2CSV()
# --- main ---
if '__main__' == __name__:
_main() | 0.339718 | 0.087759 |
from PyQt5.QtCore import QVariant
import math
from qgis.core import QgsVectorLayer, QgsExpression, QgsExpressionContext, QgsExpressionContextUtils, QgsField
def get_segment_info(datalayer):
"""
Read attributes from gps segment
:param datalayer: gps segment to be read
:return: start feature id,
end feature id,
median speed,
median distance,
total distance,
total duration
"""
exp = QgsExpression('array('
'minimum("fid"),'
'maximum("fid"),'
'median("speed"),'
'median("distance"),'
'sum("distance"),'
'sum("duration"))')
context = QgsExpressionContext()
context.appendScopes(QgsExpressionContextUtils.globalProjectLayerScopes(datalayer))
first_feature, last_feature, median_speed, median_distance, total_dist, total_time = exp.evaluate(context)
return int(first_feature), int(last_feature), median_speed, median_distance, total_dist, total_time
def break_likelihood(datalayer, feature, median_speed):
"""
Calculate break_likelihood for a point based on point speed & angle between previous & next points
:param datalayer: gps segment
:param feature: gps point id to check
:param median_speed: median speed for gps segment
:return: category_break: High/Medium/Low break likelihood for point
category_speed: High/Medium/Low point speed
category_angle: Wide/Narrow point angle
line_direction: Quadrant the direction of travel is heading
"""
prevfeature = datalayer.getFeature(feature - 1)
feature = datalayer.getFeature(feature)
a1 = prevfeature.geometry().angleAtVertex(0) * 180 / math.pi
a2 = feature.geometry().angleAtVertex(0) * 180 / math.pi
speed = feature.attribute('speed')
#Set angle = 180 for first point in segment
try:
if feature["Segment No"] == prevfeature["Segment No"]:
angle = abs(180 - abs(a1 - a2))
else:
angle = 180
except:
angle = 180
if speed > 10:
category_speed = 'High'
elif speed <= median_speed / 2:
category_speed = 'Zero'
else:
category_speed = 'Low'
if angle > 90:
category_angle = 'Wide'
if category_speed == 'Zero' or category_speed == 'High':
category_break = 'Medium'
else:
category_break = 'Low'
else:
category_angle = 'Narrow'
if category_speed == 'Low' or category_speed == 'Zero':
category_break = 'High'
else:
category_break = 'Medium'
if 0 <= a2 < 90:
line_direction = 1
elif 90 <= a2 < 180:
line_direction = 2
elif 180 <= a2 < 270:
line_direction = 3
else:
line_direction = 4
return category_break, category_speed, category_angle, line_direction
def rangequery(feature, datalayer, median_speed, median_distance):
"""
Finds all features within 10 minutes of given point which are under median distance away,
plus the previous point if speed > 2 * median speed
:param feature: point id for epicentre of search
:param datalayer: gps segment to check
:param median_speed: median segment point speed
:param median_distance: median distance between points in segment
:return: n.keys(): point ids of features in range
extreme_speed: if point list contains exceptionally fast or slow points
s_line: if no gaps exist in ids of points found
"""
featureID = feature['fid']
extreme_speed = False
sline = False
a = feature["a_time"]
#com = coordinates of centre of mass
com = (feature.geometry().asPolyline()[0])
n = dict()
#QGIS expression to search layer for points
expression = 'abs(second(to_datetime(\"a_time\") - to_datetime(\'' + a + '\'))) < 600 and ' \
'((distance(transform(start_point($geometry) , \'EPSG:4326\',\'EPSG:27700\'), ' + \
'transform(make_point(\'' + str(com.x()) + '\',\'' + str(com.y()) + '\'), ' \
'\'EPSG:4326\', \'EPSG:27700\'))<=' + str(median_distance) + ') or ' \
'(\"fid\" = ' + str(featureID - 1) + 'and \"speed\" > ' + str(float(median_speed * 2)) + '))'
datalayer.selectByExpression(expression)
for feat in datalayer.selectedFeatures():
p = feat['fid']
n[p] = True
if feat["speed"] > float(median_speed * 2) or feat["speed"] < 0.01:
n[p + 1] = True
extreme_speed = True
if len(n) == (max(n) - min(n)) + 1:
sline = True
return list(n.keys()), extreme_speed, sline
class BreakFinder:
"""
Class to find valid breakpoints in gps track
"""
def __init__(self):
pass
def find_breaks(self, data_path, point_list=None):
"""
Method to loop over points in gps track and check for valid breakpoints
:param data_path: gpkg file containing gps track
:param point_list: optional parameter to specify range of points to check
:return: updated gpkg file with OnBreak = 1 if point is breakpoint
"""
datalayer = QgsVectorLayer(data_path, 'All data', 'ogr')
selectedFeats = list()
plist = point_list
first, last, median_speed, median_distance, total_dist, total_time = get_segment_info(datalayer)
#Ignore tracks with under 250m or 2.5 min of travel, or high median speed (non walking)
if total_dist < 250 or total_time < 150 or median_speed > 10:
datalayer.dataProvider().truncate()
return False
if plist is None:
point_list = list(range(first, last + 1))
else:
first = plist[0]
last = plist[-1]
point_dict = dict.fromkeys(point_list, 'unknown')
for point in point_dict:
#Ignore points which have already been checked
if point_dict[point] != 'unknown':
continue
feature = datalayer.getFeature(point)
neighbourhood, extreme_speed, line = rangequery(feature, datalayer, median_speed, median_distance)
#Ignore very small point clusters/ possible clustered straight lines containing no extreme point speeds
if (len(neighbourhood) <= 4 or line) and not extreme_speed and len(neighbourhood) <= 10:
point_dict[point] = 'walk'
continue
point_dict[point] = 'cluster'
neighbour_dict = dict()
neighbour_direction = dict.fromkeys(list(range(1, 5)), False)
for neighbour in neighbourhood:
if not (first <= neighbour <= last):
continue
#Check break likelihood & walking direction of all points in cluster
break_chance, speed, angle, line_direction = break_likelihood(datalayer, neighbour, median_speed)
neighbour_dict[neighbour] = [break_chance, speed, angle, line_direction]
if point_dict[neighbour] == 'walk':
point_dict[neighbour] = 'cluster'
#Ignore points which have already been checked
if point_dict[neighbour] != 'unknown':
continue
point_dict[neighbour] = 'cluster'
#Find extent of cluster by checking each point
new_neighbours, extreme_speed, line = rangequery(datalayer.getFeature(neighbour), datalayer,
median_speed, median_distance)
if (len(new_neighbours) > 4 and not line) or extreme_speed or len(new_neighbours) > 10:
for new_neighbour in new_neighbours:
if new_neighbour not in neighbourhood:
neighbourhood.append(new_neighbour)
min_breakpoint = math.inf
max_breakpoint = 0
breakcount = 0
for k, v in sorted(neighbour_dict.items()):
if v[0] != 'Low':
if k < min_breakpoint:
min_breakpoint = k
if k > max_breakpoint:
max_breakpoint = k
breakcount += 1
#If no points have medium/high break likelihood, ignore cluster
if breakcount == 0 or len(neighbourhood) <= 4:
for neighbour in neighbour_dict:
point_dict[neighbour] = 'walk'
continue
breakpoints = list(range(min_breakpoint, max_breakpoint + 1))
#Check break likelihood of 'gaps' in cluster id list (ie cluster = points [1,2,5,6], check ids 3 & 4)
for item in breakpoints:
if item not in neighbourhood:
break_chance, speed, angle, line_direction = break_likelihood(datalayer, item, median_speed)
neighbour_dict[item] = [break_chance, speed, angle, line_direction]
if break_chance != 'Low':
breakcount += 1
neighbour_direction[neighbour_dict[item][3]] = True
#Check to ensure track doubles back on itself
if (neighbour_direction[1] & neighbour_direction[3]) or \
(neighbour_direction[2] & neighbour_direction[4]):
#Check less than half the points have low break likelihood
if breakcount / len(breakpoints) >= 0.5:
selectedFeats.extend(breakpoints)
point_dict.update(dict.fromkeys(breakpoints, 'cluster'))
#Update/Add OnBreak field to
field_no = datalayer.dataProvider().fieldNameIndex("OnBreak")
if field_no == -1:
newAttribute = [QgsField('OnBreak', QVariant.Int, 'Integer')]
datalayer.dataProvider().addAttributes(newAttribute)
field_no = datalayer.dataProvider().fieldNameIndex("OnBreak")
change_dict = {field_no: 1}
add_breaks = dict.fromkeys(selectedFeats, change_dict)
datalayer.dataProvider().changeAttributeValues(add_breaks)
return True | src/gps_reader_pkg/break_finder.py | from PyQt5.QtCore import QVariant
import math
from qgis.core import QgsVectorLayer, QgsExpression, QgsExpressionContext, QgsExpressionContextUtils, QgsField
def get_segment_info(datalayer):
"""
Read attributes from gps segment
:param datalayer: gps segment to be read
:return: start feature id,
end feature id,
median speed,
median distance,
total distance,
total duration
"""
exp = QgsExpression('array('
'minimum("fid"),'
'maximum("fid"),'
'median("speed"),'
'median("distance"),'
'sum("distance"),'
'sum("duration"))')
context = QgsExpressionContext()
context.appendScopes(QgsExpressionContextUtils.globalProjectLayerScopes(datalayer))
first_feature, last_feature, median_speed, median_distance, total_dist, total_time = exp.evaluate(context)
return int(first_feature), int(last_feature), median_speed, median_distance, total_dist, total_time
def break_likelihood(datalayer, feature, median_speed):
"""
Calculate break_likelihood for a point based on point speed & angle between previous & next points
:param datalayer: gps segment
:param feature: gps point id to check
:param median_speed: median speed for gps segment
:return: category_break: High/Medium/Low break likelihood for point
category_speed: High/Medium/Low point speed
category_angle: Wide/Narrow point angle
line_direction: Quadrant the direction of travel is heading
"""
prevfeature = datalayer.getFeature(feature - 1)
feature = datalayer.getFeature(feature)
a1 = prevfeature.geometry().angleAtVertex(0) * 180 / math.pi
a2 = feature.geometry().angleAtVertex(0) * 180 / math.pi
speed = feature.attribute('speed')
#Set angle = 180 for first point in segment
try:
if feature["Segment No"] == prevfeature["Segment No"]:
angle = abs(180 - abs(a1 - a2))
else:
angle = 180
except:
angle = 180
if speed > 10:
category_speed = 'High'
elif speed <= median_speed / 2:
category_speed = 'Zero'
else:
category_speed = 'Low'
if angle > 90:
category_angle = 'Wide'
if category_speed == 'Zero' or category_speed == 'High':
category_break = 'Medium'
else:
category_break = 'Low'
else:
category_angle = 'Narrow'
if category_speed == 'Low' or category_speed == 'Zero':
category_break = 'High'
else:
category_break = 'Medium'
if 0 <= a2 < 90:
line_direction = 1
elif 90 <= a2 < 180:
line_direction = 2
elif 180 <= a2 < 270:
line_direction = 3
else:
line_direction = 4
return category_break, category_speed, category_angle, line_direction
def rangequery(feature, datalayer, median_speed, median_distance):
"""
Finds all features within 10 minutes of given point which are under median distance away,
plus the previous point if speed > 2 * median speed
:param feature: point id for epicentre of search
:param datalayer: gps segment to check
:param median_speed: median segment point speed
:param median_distance: median distance between points in segment
:return: n.keys(): point ids of features in range
extreme_speed: if point list contains exceptionally fast or slow points
s_line: if no gaps exist in ids of points found
"""
featureID = feature['fid']
extreme_speed = False
sline = False
a = feature["a_time"]
#com = coordinates of centre of mass
com = (feature.geometry().asPolyline()[0])
n = dict()
#QGIS expression to search layer for points
expression = 'abs(second(to_datetime(\"a_time\") - to_datetime(\'' + a + '\'))) < 600 and ' \
'((distance(transform(start_point($geometry) , \'EPSG:4326\',\'EPSG:27700\'), ' + \
'transform(make_point(\'' + str(com.x()) + '\',\'' + str(com.y()) + '\'), ' \
'\'EPSG:4326\', \'EPSG:27700\'))<=' + str(median_distance) + ') or ' \
'(\"fid\" = ' + str(featureID - 1) + 'and \"speed\" > ' + str(float(median_speed * 2)) + '))'
datalayer.selectByExpression(expression)
for feat in datalayer.selectedFeatures():
p = feat['fid']
n[p] = True
if feat["speed"] > float(median_speed * 2) or feat["speed"] < 0.01:
n[p + 1] = True
extreme_speed = True
if len(n) == (max(n) - min(n)) + 1:
sline = True
return list(n.keys()), extreme_speed, sline
class BreakFinder:
"""
Class to find valid breakpoints in gps track
"""
def __init__(self):
pass
def find_breaks(self, data_path, point_list=None):
"""
Method to loop over points in gps track and check for valid breakpoints
:param data_path: gpkg file containing gps track
:param point_list: optional parameter to specify range of points to check
:return: updated gpkg file with OnBreak = 1 if point is breakpoint
"""
datalayer = QgsVectorLayer(data_path, 'All data', 'ogr')
selectedFeats = list()
plist = point_list
first, last, median_speed, median_distance, total_dist, total_time = get_segment_info(datalayer)
#Ignore tracks with under 250m or 2.5 min of travel, or high median speed (non walking)
if total_dist < 250 or total_time < 150 or median_speed > 10:
datalayer.dataProvider().truncate()
return False
if plist is None:
point_list = list(range(first, last + 1))
else:
first = plist[0]
last = plist[-1]
point_dict = dict.fromkeys(point_list, 'unknown')
for point in point_dict:
#Ignore points which have already been checked
if point_dict[point] != 'unknown':
continue
feature = datalayer.getFeature(point)
neighbourhood, extreme_speed, line = rangequery(feature, datalayer, median_speed, median_distance)
#Ignore very small point clusters/ possible clustered straight lines containing no extreme point speeds
if (len(neighbourhood) <= 4 or line) and not extreme_speed and len(neighbourhood) <= 10:
point_dict[point] = 'walk'
continue
point_dict[point] = 'cluster'
neighbour_dict = dict()
neighbour_direction = dict.fromkeys(list(range(1, 5)), False)
for neighbour in neighbourhood:
if not (first <= neighbour <= last):
continue
#Check break likelihood & walking direction of all points in cluster
break_chance, speed, angle, line_direction = break_likelihood(datalayer, neighbour, median_speed)
neighbour_dict[neighbour] = [break_chance, speed, angle, line_direction]
if point_dict[neighbour] == 'walk':
point_dict[neighbour] = 'cluster'
#Ignore points which have already been checked
if point_dict[neighbour] != 'unknown':
continue
point_dict[neighbour] = 'cluster'
#Find extent of cluster by checking each point
new_neighbours, extreme_speed, line = rangequery(datalayer.getFeature(neighbour), datalayer,
median_speed, median_distance)
if (len(new_neighbours) > 4 and not line) or extreme_speed or len(new_neighbours) > 10:
for new_neighbour in new_neighbours:
if new_neighbour not in neighbourhood:
neighbourhood.append(new_neighbour)
min_breakpoint = math.inf
max_breakpoint = 0
breakcount = 0
for k, v in sorted(neighbour_dict.items()):
if v[0] != 'Low':
if k < min_breakpoint:
min_breakpoint = k
if k > max_breakpoint:
max_breakpoint = k
breakcount += 1
#If no points have medium/high break likelihood, ignore cluster
if breakcount == 0 or len(neighbourhood) <= 4:
for neighbour in neighbour_dict:
point_dict[neighbour] = 'walk'
continue
breakpoints = list(range(min_breakpoint, max_breakpoint + 1))
#Check break likelihood of 'gaps' in cluster id list (ie cluster = points [1,2,5,6], check ids 3 & 4)
for item in breakpoints:
if item not in neighbourhood:
break_chance, speed, angle, line_direction = break_likelihood(datalayer, item, median_speed)
neighbour_dict[item] = [break_chance, speed, angle, line_direction]
if break_chance != 'Low':
breakcount += 1
neighbour_direction[neighbour_dict[item][3]] = True
#Check to ensure track doubles back on itself
if (neighbour_direction[1] & neighbour_direction[3]) or \
(neighbour_direction[2] & neighbour_direction[4]):
#Check less than half the points have low break likelihood
if breakcount / len(breakpoints) >= 0.5:
selectedFeats.extend(breakpoints)
point_dict.update(dict.fromkeys(breakpoints, 'cluster'))
#Update/Add OnBreak field to
field_no = datalayer.dataProvider().fieldNameIndex("OnBreak")
if field_no == -1:
newAttribute = [QgsField('OnBreak', QVariant.Int, 'Integer')]
datalayer.dataProvider().addAttributes(newAttribute)
field_no = datalayer.dataProvider().fieldNameIndex("OnBreak")
change_dict = {field_no: 1}
add_breaks = dict.fromkeys(selectedFeats, change_dict)
datalayer.dataProvider().changeAttributeValues(add_breaks)
return True | 0.768907 | 0.60013 |
import numpy as np
from .base import RNNBase
from .linear import LinearLayer
from .activation import Tanh, Softmax, Sigmoid
class RNNCell(RNNBase):
"""
Recurrent neural network cell implementation.
"""
def __init__(self, input_dim, hidden_dim):
"""
Initialize the parameters with the input and hidden
dimensions.
Parameters
----------
input_dim : int
Dimension of the input.
hidden_dim : int
Number of units in the RNN cell.
"""
super(RNNCell, self).__init__(input_dim, hidden_dim)
self.lineal = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.tanh = Tanh()
def forward(self, input_X, hidden=None):
"""
Computes the forward propagation of the RNN.
Parameters
----------
input_X : numpy.array or list
List containing all the inputs that will be used to
propagete along the RNN cell.
Returns
-------
hidden : numpy.array
Array containing the output of the hidden state.
"""
if hidden is None: hidden = np.zeros((self.hidden_dim, 1))
# combine the input with the hidden state
self.combined = np.concatenate((input_X, hidden), axis=0)
input_hidden = self.lineal.forward(self.combined)
# hidden state
hidden = self.tanh.forward(input_hidden)
return hidden
def backward(self, dZ, d_hidden=0, hidden=None, combined=None):
"""
Computes the backward propagation of the model.
Parameters
----------
dZ : numpy.array
The gradient of the of the output with respect to the
next layer.
Returns
-------
d_hidden : numpy.array
The gradient of the input with respect to the current
layer.
"""
# derivative of the hyperbolic tangent
self.tanh.A = hidden
d_tanh = self.tanh.backward(dZ)
# derivative of the hidden state
parameters = self._retrieve_parameters(self.lineal, d_hidden)
self.lineal.A = combined
d_hidden = self.lineal.backward(d_tanh)
self.lineal = self._update_parameters(self.lineal, parameters)
return d_hidden[-self.hidden_dim:, :]
def optimize(self, method):
"""
Updates the parameters of the model using a given optimize
method.
Parameters
----------
method: Class
Method to use in order to optimize the parameters.
"""
self.lineal.optimize(method)
class RNN(RNNBase):
"""
Implementation of a Recurrent neural network.
"""
def __init__(self, input_dim, output_dim, hidden_dim):
"""
Initialize the parameters with the input, output and hidden
dimensions.
Parameters
----------
input_dim : int
Dimension of the input.
output_dim : int
Dimension of the output.
hidden_dim : int
Number of units in the RNN cell.
"""
super(RNN, self).__init__(input_dim, hidden_dim)
self.output_dim = output_dim
self.type = 'Recurrent'
self.rnn_cell = RNNCell(input_dim, hidden_dim)
self.lineal = LinearLayer(hidden_dim, output_dim)
self.softmax = Softmax()
def forward(self, input_X, hidden=None):
"""
Computes the forward propagation of the RNN.
Parameters
----------
input_X : numpy.array or list
List containing all the inputs that will be used to
propagete along the RNN cell.
Returns
-------
y_preds : list
List containing all the preditions for each input of the
input_X list.
"""
self.input_X = input_X
if hidden is None: hidden = np.zeros((self.hidden_dim, 1))
self.hiddens = [hidden]
self.combines = []
outputs = []
for input in input_X:
# compute the output
hidden = self.rnn_cell.forward(input, hidden)
input_softmax = self.lineal.forward(hidden)
output = self.softmax.forward(input_softmax)
# save the outputs for the backward prop
outputs.append(output.tolist())
self.hiddens.append(hidden)
self.combines.append(self.rnn_cell.combined)
return np.array(outputs)
def backward(self, dZ, d_hidden=0):
"""
Computes the backward propagation of the model.
Parameters
----------
dZ : numpy.array
The gradient of the of the output with respect to the
next layer.
Returns
-------
d_hidden : numpy.array
The gradient of the input with respect to the current
layer.
"""
reverse = zip(
reversed(dZ), reversed(self.hiddens), reversed(self.combines)
)
for dz, hidden, combined in reverse:
# derivative of the output
parameters = self._retrieve_parameters(self.lineal, d_hidden)
self.lineal.A = hidden
d_output = self.lineal.backward(dz) + d_hidden
self.lineal = self._update_parameters(self.lineal, parameters)
# derivative of the hidden state
d_hidden = self.rnn_cell.backward(
d_output, d_hidden, hidden, combined
)
return d_hidden
def optimize(self, method):
"""
Updates the parameters of the model using a given optimize
method.
Parameters
----------
method: Class
Method to use in order to optimize the parameters.
"""
self.lineal.optimize(method)
self.rnn_cell.optimize(method)
class LSTMCell(RNNBase):
"""
Recurrent neural network cell implementation.
"""
def __init__(self, input_dim, hidden_dim):
"""
Initialize the parameters with the input and hidden
dimensions.
Parameters
----------
input_dim : int
Dimension of the input.
hidden_dim : int
Number of units in the RNN cell.
"""
super(LSTMCell, self).__init__(input_dim, hidden_dim)
self.lineal_f = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.sigmoid_f = Sigmoid()
self.lineal_i = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.sigmoid_i = Sigmoid()
self.lineal_tanh = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.tanh_i = Tanh()
self.lineal_o = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.sigmoid_o = Sigmoid()
self.tanh_c = Tanh()
def forward(self, input_X, hidden=None, cell_state=None):
"""
Computes the forward propagation of the RNN.
Parameters
----------
input_X : numpy.array or list
List containing all the inputs that will be used to
propagete along the RNN cell.
Returns
-------
hidden : numpy.array
Array containing the output of the hidden state.
"""
if hidden is None: hidden = np.zeros((self.hidden_dim, 1))
# combine the input with the hidden state
self.combined = np.concatenate((input_X, hidden), axis=0)
forget_input = self.lineal_f.forward(self.combined)
forget_layer = self.sigmoid_f.forward(forget_input)
input_input = self.lineal_i.forward(self.combined)
input_layer = self.sigmoid_i.forward(input_input)
tanh_input = self.lineal_tanh.forward(self.combined)
tanh_layer = self.tanh_i.forward(tanh_input)
output_input = self.lineal_o.forward(self.combined)
output_layer = self.sigmoid_o.forward(output_input)
cell_state = forget_layer * cell_state + input_layer * tanh_layer
_tanh_c = self.tanh_c.forward(cell_state)
hidden = output_layer * _tanh_c
return hidden, cell_state
def backward(self, dZ, d_hidden=0, hidden=None, combined=None):
"""
Computes the backward propagation of the model.
Parameters
----------
dZ : numpy.array
The gradient of the of the output with respect to the
next layer.
Returns
-------
d_hidden : numpy.array
The gradient of the input with respect to the current
layer.
"""
# derivative of the hyperbolic tangent
self.tanh.A = hidden
d_tanh_c = self.tanh_c.backward(dZ)
# derivative of the hidden state
parameters = self._retrieve_parameters(self.lineal, d_hidden)
self.lineal.A = combined
d_hidden = self.lineal.backward(d_tanh_c)
self.lineal = self._update_parameters(self.lineal, parameters)
return d_hidden[-self.hidden_dim:, :]
def optimize(self, method):
"""
Updates the parameters of the model using a given optimize
method.
Parameters
----------
method: Class
Method to use in order to optimize the parameters.
"""
self.lineal.optimize(method) | src/dnetworks/layers/recurrent.py | import numpy as np
from .base import RNNBase
from .linear import LinearLayer
from .activation import Tanh, Softmax, Sigmoid
class RNNCell(RNNBase):
"""
Recurrent neural network cell implementation.
"""
def __init__(self, input_dim, hidden_dim):
"""
Initialize the parameters with the input and hidden
dimensions.
Parameters
----------
input_dim : int
Dimension of the input.
hidden_dim : int
Number of units in the RNN cell.
"""
super(RNNCell, self).__init__(input_dim, hidden_dim)
self.lineal = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.tanh = Tanh()
def forward(self, input_X, hidden=None):
"""
Computes the forward propagation of the RNN.
Parameters
----------
input_X : numpy.array or list
List containing all the inputs that will be used to
propagete along the RNN cell.
Returns
-------
hidden : numpy.array
Array containing the output of the hidden state.
"""
if hidden is None: hidden = np.zeros((self.hidden_dim, 1))
# combine the input with the hidden state
self.combined = np.concatenate((input_X, hidden), axis=0)
input_hidden = self.lineal.forward(self.combined)
# hidden state
hidden = self.tanh.forward(input_hidden)
return hidden
def backward(self, dZ, d_hidden=0, hidden=None, combined=None):
"""
Computes the backward propagation of the model.
Parameters
----------
dZ : numpy.array
The gradient of the of the output with respect to the
next layer.
Returns
-------
d_hidden : numpy.array
The gradient of the input with respect to the current
layer.
"""
# derivative of the hyperbolic tangent
self.tanh.A = hidden
d_tanh = self.tanh.backward(dZ)
# derivative of the hidden state
parameters = self._retrieve_parameters(self.lineal, d_hidden)
self.lineal.A = combined
d_hidden = self.lineal.backward(d_tanh)
self.lineal = self._update_parameters(self.lineal, parameters)
return d_hidden[-self.hidden_dim:, :]
def optimize(self, method):
"""
Updates the parameters of the model using a given optimize
method.
Parameters
----------
method: Class
Method to use in order to optimize the parameters.
"""
self.lineal.optimize(method)
class RNN(RNNBase):
"""
Implementation of a Recurrent neural network.
"""
def __init__(self, input_dim, output_dim, hidden_dim):
"""
Initialize the parameters with the input, output and hidden
dimensions.
Parameters
----------
input_dim : int
Dimension of the input.
output_dim : int
Dimension of the output.
hidden_dim : int
Number of units in the RNN cell.
"""
super(RNN, self).__init__(input_dim, hidden_dim)
self.output_dim = output_dim
self.type = 'Recurrent'
self.rnn_cell = RNNCell(input_dim, hidden_dim)
self.lineal = LinearLayer(hidden_dim, output_dim)
self.softmax = Softmax()
def forward(self, input_X, hidden=None):
"""
Computes the forward propagation of the RNN.
Parameters
----------
input_X : numpy.array or list
List containing all the inputs that will be used to
propagete along the RNN cell.
Returns
-------
y_preds : list
List containing all the preditions for each input of the
input_X list.
"""
self.input_X = input_X
if hidden is None: hidden = np.zeros((self.hidden_dim, 1))
self.hiddens = [hidden]
self.combines = []
outputs = []
for input in input_X:
# compute the output
hidden = self.rnn_cell.forward(input, hidden)
input_softmax = self.lineal.forward(hidden)
output = self.softmax.forward(input_softmax)
# save the outputs for the backward prop
outputs.append(output.tolist())
self.hiddens.append(hidden)
self.combines.append(self.rnn_cell.combined)
return np.array(outputs)
def backward(self, dZ, d_hidden=0):
"""
Computes the backward propagation of the model.
Parameters
----------
dZ : numpy.array
The gradient of the of the output with respect to the
next layer.
Returns
-------
d_hidden : numpy.array
The gradient of the input with respect to the current
layer.
"""
reverse = zip(
reversed(dZ), reversed(self.hiddens), reversed(self.combines)
)
for dz, hidden, combined in reverse:
# derivative of the output
parameters = self._retrieve_parameters(self.lineal, d_hidden)
self.lineal.A = hidden
d_output = self.lineal.backward(dz) + d_hidden
self.lineal = self._update_parameters(self.lineal, parameters)
# derivative of the hidden state
d_hidden = self.rnn_cell.backward(
d_output, d_hidden, hidden, combined
)
return d_hidden
def optimize(self, method):
"""
Updates the parameters of the model using a given optimize
method.
Parameters
----------
method: Class
Method to use in order to optimize the parameters.
"""
self.lineal.optimize(method)
self.rnn_cell.optimize(method)
class LSTMCell(RNNBase):
"""
Recurrent neural network cell implementation.
"""
def __init__(self, input_dim, hidden_dim):
"""
Initialize the parameters with the input and hidden
dimensions.
Parameters
----------
input_dim : int
Dimension of the input.
hidden_dim : int
Number of units in the RNN cell.
"""
super(LSTMCell, self).__init__(input_dim, hidden_dim)
self.lineal_f = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.sigmoid_f = Sigmoid()
self.lineal_i = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.sigmoid_i = Sigmoid()
self.lineal_tanh = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.tanh_i = Tanh()
self.lineal_o = LinearLayer(input_dim + hidden_dim, hidden_dim)
self.sigmoid_o = Sigmoid()
self.tanh_c = Tanh()
def forward(self, input_X, hidden=None, cell_state=None):
"""
Computes the forward propagation of the RNN.
Parameters
----------
input_X : numpy.array or list
List containing all the inputs that will be used to
propagete along the RNN cell.
Returns
-------
hidden : numpy.array
Array containing the output of the hidden state.
"""
if hidden is None: hidden = np.zeros((self.hidden_dim, 1))
# combine the input with the hidden state
self.combined = np.concatenate((input_X, hidden), axis=0)
forget_input = self.lineal_f.forward(self.combined)
forget_layer = self.sigmoid_f.forward(forget_input)
input_input = self.lineal_i.forward(self.combined)
input_layer = self.sigmoid_i.forward(input_input)
tanh_input = self.lineal_tanh.forward(self.combined)
tanh_layer = self.tanh_i.forward(tanh_input)
output_input = self.lineal_o.forward(self.combined)
output_layer = self.sigmoid_o.forward(output_input)
cell_state = forget_layer * cell_state + input_layer * tanh_layer
_tanh_c = self.tanh_c.forward(cell_state)
hidden = output_layer * _tanh_c
return hidden, cell_state
def backward(self, dZ, d_hidden=0, hidden=None, combined=None):
"""
Computes the backward propagation of the model.
Parameters
----------
dZ : numpy.array
The gradient of the of the output with respect to the
next layer.
Returns
-------
d_hidden : numpy.array
The gradient of the input with respect to the current
layer.
"""
# derivative of the hyperbolic tangent
self.tanh.A = hidden
d_tanh_c = self.tanh_c.backward(dZ)
# derivative of the hidden state
parameters = self._retrieve_parameters(self.lineal, d_hidden)
self.lineal.A = combined
d_hidden = self.lineal.backward(d_tanh_c)
self.lineal = self._update_parameters(self.lineal, parameters)
return d_hidden[-self.hidden_dim:, :]
def optimize(self, method):
"""
Updates the parameters of the model using a given optimize
method.
Parameters
----------
method: Class
Method to use in order to optimize the parameters.
"""
self.lineal.optimize(method) | 0.913732 | 0.643441 |
import numpy as np
import dolfin
from dolfin import *
from mpi4py import MPI as pyMPI
from leopart import StokesStaticCondensation, FormsStokes
import geopart.stokes.incompressible
import dolfin_dg as dg
comm = pyMPI.COMM_WORLD
mpi_comm = MPI.comm_world
#load mesh,boundaries and coefficients from file
mark = {"Internal":0, "wall": 1,"inlet": 2,"outlet": 3 }
#read mesh and boundaries from file
mesh = Mesh()
hdf = HDF5File(mesh.mpi_comm(), "mesh_boundaries.h5", "r")
hdf.read(mesh, "/mesh", False)
boundaries = MeshFunction('size_t', mesh, mesh.topology().dim() - 1)
hdf.read(boundaries, "/boundaries")
hdf.close()
#read viscosity coefficient from file
mu_ele = FunctionSpace(mesh, "DG", 0)
mu = Function(mu_ele)
hdf = HDF5File(mesh.mpi_comm(), "mesh_coeffs.h5", "r")
hdf.read(mu, "/mu")
hdf.close()
#output viscosity to paraview
XDMFFile(mpi_comm, "coeff_preview.xdmf").write_checkpoint(mu, "coeffs", 0)
#Define HDG element and function space
element_cls = geopart.stokes.incompressible.HDG2()
W = element_cls.function_space(mesh)
ds = dolfin.Measure('ds',domain=mesh,subdomain_data=boundaries)
n = dolfin.FacetNormal(mesh)
#Define boundary condition
U = element_cls.create_solution_variable(W)
p_in = dolfin.Constant(1.0) # pressure inlet
p_out = dolfin.Constant(0.0) # pressure outlet
noslip = dolfin.Constant([0.0]*mesh.geometry().dim()) # no-slip wall
#Boundary conditions
gN1 = (- p_out*dolfin.Identity(mesh.geometry().dim())) * n
Neumann_outlet=dg.DGNeumannBC(ds(mark["outlet"]), gN1)
gN2 = (- p_in*dolfin.Identity(mesh.geometry().dim())) * n
Neumann_inlet=dg.DGNeumannBC(ds(mark["inlet"]), gN2)
Dirichlet_wall=dg.DGDirichletBC(ds(mark["wall"]), noslip)
weak_bcs = [Dirichlet_wall,Neumann_inlet,Neumann_outlet]
#Body force term
f = dolfin.Constant([0.0]*mesh.geometry().dim())
model=geopart.stokes.StokesModel(eta=mu,f=f)
#Form and solve Stokes
A, b = dolfin.PETScMatrix(), dolfin.PETScVector()
element_cls.solve_stokes(W, U, (A, b), weak_bcs, model)
uh, ph = element_cls.get_velocity(U), element_cls.get_pressure(U)
#Output solution p,u to paraview
dolfin.XDMFFile("pressure.xdmf").write_checkpoint(ph, "p")
dolfin.XDMFFile("velocity.xdmf").write_checkpoint(uh, "u")
flux = [dolfin.assemble(dolfin.dot(uh, n)*ds(i)) for i in range(len(mark))]
if comm.Get_rank() == 0:
for key, value in mark.items():
print("Flux_%s= %.15lf"%(key,flux[value])) | hdg_test/3d/hdg_test.py | import numpy as np
import dolfin
from dolfin import *
from mpi4py import MPI as pyMPI
from leopart import StokesStaticCondensation, FormsStokes
import geopart.stokes.incompressible
import dolfin_dg as dg
comm = pyMPI.COMM_WORLD
mpi_comm = MPI.comm_world
#load mesh,boundaries and coefficients from file
mark = {"Internal":0, "wall": 1,"inlet": 2,"outlet": 3 }
#read mesh and boundaries from file
mesh = Mesh()
hdf = HDF5File(mesh.mpi_comm(), "mesh_boundaries.h5", "r")
hdf.read(mesh, "/mesh", False)
boundaries = MeshFunction('size_t', mesh, mesh.topology().dim() - 1)
hdf.read(boundaries, "/boundaries")
hdf.close()
#read viscosity coefficient from file
mu_ele = FunctionSpace(mesh, "DG", 0)
mu = Function(mu_ele)
hdf = HDF5File(mesh.mpi_comm(), "mesh_coeffs.h5", "r")
hdf.read(mu, "/mu")
hdf.close()
#output viscosity to paraview
XDMFFile(mpi_comm, "coeff_preview.xdmf").write_checkpoint(mu, "coeffs", 0)
#Define HDG element and function space
element_cls = geopart.stokes.incompressible.HDG2()
W = element_cls.function_space(mesh)
ds = dolfin.Measure('ds',domain=mesh,subdomain_data=boundaries)
n = dolfin.FacetNormal(mesh)
#Define boundary condition
U = element_cls.create_solution_variable(W)
p_in = dolfin.Constant(1.0) # pressure inlet
p_out = dolfin.Constant(0.0) # pressure outlet
noslip = dolfin.Constant([0.0]*mesh.geometry().dim()) # no-slip wall
#Boundary conditions
gN1 = (- p_out*dolfin.Identity(mesh.geometry().dim())) * n
Neumann_outlet=dg.DGNeumannBC(ds(mark["outlet"]), gN1)
gN2 = (- p_in*dolfin.Identity(mesh.geometry().dim())) * n
Neumann_inlet=dg.DGNeumannBC(ds(mark["inlet"]), gN2)
Dirichlet_wall=dg.DGDirichletBC(ds(mark["wall"]), noslip)
weak_bcs = [Dirichlet_wall,Neumann_inlet,Neumann_outlet]
#Body force term
f = dolfin.Constant([0.0]*mesh.geometry().dim())
model=geopart.stokes.StokesModel(eta=mu,f=f)
#Form and solve Stokes
A, b = dolfin.PETScMatrix(), dolfin.PETScVector()
element_cls.solve_stokes(W, U, (A, b), weak_bcs, model)
uh, ph = element_cls.get_velocity(U), element_cls.get_pressure(U)
#Output solution p,u to paraview
dolfin.XDMFFile("pressure.xdmf").write_checkpoint(ph, "p")
dolfin.XDMFFile("velocity.xdmf").write_checkpoint(uh, "u")
flux = [dolfin.assemble(dolfin.dot(uh, n)*ds(i)) for i in range(len(mark))]
if comm.Get_rank() == 0:
for key, value in mark.items():
print("Flux_%s= %.15lf"%(key,flux[value])) | 0.278159 | 0.317916 |
import dask.array
import h5py
import logging
import math
import numpy as np
import os
import pickle
from pytorch_pretrained_bert import BertTokenizer, BertModel
import random
import subprocess
import torch
import urllib
nonbreaking_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/share'
'/nonbreaking_prefixes/nonbreaking_prefix')
normalize_punct_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/normalize-punctuation.perl')
remove_nonprint_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/remove-non-printing-char.perl')
tokenizer_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/tokenizer.perl')
logger = logging.getLogger('fr2en')
class LanguageCorpus:
"""
This is the most basic corpus and base class for other corpora.
It uses a perl script from Moses to tokenize and `subword-nmt` to form
BPE vocabulary. These are standard tools for preprocessing, see e.g.
https://github.com/pytorch/fairseq/blob/master/examples/translation/prepare-wmt14en2de.sh # noqa: E501
It outputs sequences of integers indexing into the vocabulary.
Moses is available at https://github.com/moses-smt/mosesdecoder.
"""
data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
moses_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'data', 'moses')
def __init__(self,
name,
shuffle=True,
max_length=200):
"""`max_length` is the maximum length of a sentence in BPE tokens."""
self.name = name
self.shuffle = shuffle
self.max_length = max_length
os.makedirs(os.path.join(self.moses_dir, 'tokenizer'), exist_ok=True)
def _clean(self, datafiles, max_size=None, use_cache=False):
"""
Downloads Moses perl scripts if necessary, and uses them to normalize
punctuation and remove non-printable characters.
"""
# Download datafiles.
normpunct_fn = normalize_punct_url.split('/')[-1]
normpunct_path = os.path.join(self.moses_dir, 'tokenizer', normpunct_fn)
remnon_fn = remove_nonprint_url.split('/')[-1]
remnon_path = os.path.join(self.moses_dir, 'tokenizer', remnon_fn)
if not os.path.isfile(normpunct_path):
urllib.request.urlretrieve(
normalize_punct_url, filename=normpunct_path)
if not os.path.isfile(remnon_path):
urllib.request.urlretrieve(
remove_nonprint_url, filename=remnon_path)
# Prepare an output directory.
out_path = os.path.join(self.data_dir, self.name, 'cleaned')
os.makedirs(os.path.join(self.data_dir, self.name), exist_ok=True)
# Concatenate datasets for each language.
langs = set()
for dataset in datafiles:
for lang in dataset:
langs.add(lang)
if not use_cache or not os.path.isfile(f'{out_path}.{lang}'):
os.system(f'cat {dataset[lang]} >> tmp.{lang}')
# Clean datasets for each language.
for lang in langs:
if not use_cache or not os.path.isfile(f'{out_path}.{lang}'):
logger.info(f'Cleaning {lang} combined dataset.')
max_size = 100000000000 if max_size is None else max_size
os.system(f'head -n {max_size} tmp.{lang} '
f'| perl {normpunct_path} {lang} '
f'| perl {remnon_path} > {out_path}.{lang}')
os.system(f'rm -rf tmp.{lang}')
else:
logger.info(
f'Using previously cleaned dataset {out_path}.{lang}.')
return out_path, list(langs)
def _tokenize(self, data_path, langs, use_cache=False):
"""Tokenizes into BPE tokens using a perl script from Moses."""
tokenizer_fn = tokenizer_url.split('/')[-1]
tokenizer_path = os.path.join(self.moses_dir, 'tokenizer', tokenizer_fn)
if not os.path.isfile(tokenizer_path):
urllib.request.urlretrieve(tokenizer_url, filename=tokenizer_path)
nonbreaking_dir = \
os.path.join(self.moses_dir, 'share', 'nonbreaking_prefixes')
os.makedirs(nonbreaking_dir, exist_ok=True)
nonbreaking_fn = nonbreaking_url.split('/')[-1]
nonbreaking_path = os.path.join(nonbreaking_dir, nonbreaking_fn)
for lang in langs:
if not os.path.isfile(f'{nonbreaking_path}.{lang}'):
urllib.request.urlretrieve(
f'{nonbreaking_url}.{lang}',
filename=f'{nonbreaking_path}.{lang}')
tok_path = os.path.join(self.data_dir, self.name, 'tokens')
for lang in langs:
if not use_cache or not os.path.isfile(f'{tok_path}.{lang}'):
logger.info(f'Tokenizing dataset {data_path}.{lang}.')
os.system(
f'cat {data_path}.{lang} '
f'| perl {tokenizer_path} -threads 8 -a -l {lang} '
f'> {tok_path}.{lang}')
else:
logger.info(
f'Using previously tokenized dataset {data_path}.{lang}')
return tok_path
def _filter_sents(self, tok_path, langs, use_cache=False):
logging.info('Filtering out sentence pairs with invalid lengths.')
# Filter out sentence pairs with invalid lengths.
if (not use_cache
or not os.path.isfile(f'{tok_path}.filtered.{langs[0]}')
or not os.path.isfile(f'{tok_path}.filtered.{langs[1]}')):
with open(f'{tok_path}.{langs[0]}', 'r') as f, \
open(f'{tok_path}.{langs[1]}', 'r') as g, \
open(f'{tok_path}.filtered.{langs[0]}', 'w') as f_out, \
open(f'{tok_path}.filtered.{langs[1]}', 'w') as g_out:
line1 = f.readline()
line2 = g.readline()
while line1 and line2:
l1 = len(line1.split())
l2 = len(line2.split())
if ((not (l1 > 1.5 * l2 or l2 > 1.5 * l1)
or (l1 <= 10 and l2 <= 10)) and l1 > 0 and l2 > 0):
# readline() keeps the newline, write() does not add one
f_out.write(line1)
g_out.write(line2)
line1 = f.readline()
line2 = g.readline()
def _encode(self, tok_path, langs, joint_vocab_size, use_cache=False):
"""
Tokenizes sentences using `subword-nmt` and converts them to sequences
of integers.
"""
# Learn joint BPE.
vocab_path = os.path.join(self.data_dir, self.name, 'vocab')
freqs_path = os.path.join(self.data_dir, self.name, 'freqs')
codes_path = os.path.join(self.data_dir, self.name, 'bpe_codes')
bpe_path = os.path.join(self.data_dir, self.name, 'int_toks')
if (not use_cache or not os.path.isfile(f'{freqs_path}.{langs[0]}')
or not os.path.isfile(codes_path)):
logging.info('Learning joint BPE.')
learn_cmd = (
'subword-nmt learn-joint-bpe-and-vocab '
f'--input {tok_path}.{langs[0]} {tok_path}.{langs[1]} '
f'-s {joint_vocab_size // 2} -o {codes_path} '
f'--write-vocabulary '
f'{freqs_path}.{langs[0]} {freqs_path}.{langs[1]}')
os.system(learn_cmd)
else:
logging.info('Using previously learned joint BPE.')
logging.info(f'Preparing joint vocabulary of size at most '
f'{joint_vocab_size + 4}.')
self._filter_sents(tok_path, langs, use_cache)
# Add special tokens to frequencies (word plus num of occurrences).
freqs = ['[PAD] 1000', '[UNK] 1000', '[CLS] 1000', '[SEP] 1000']
with open(f'{freqs_path}.{langs[0]}', 'r') as f_freqs, \
open(f'{freqs_path}.{langs[1]}', 'r') as g_freqs:
line1 = f_freqs.readline()
line2 = g_freqs.readline()
seen = set()
while line1 and line2:
f1 = line1.split()
f2 = line2.split()
while len(f1) < 2 or f1[0] in seen:
line1 = f_freqs.readline()
f1 = line1.split()
seen.add(f1[0])
while len(f2) < 2 or f2[0] in seen:
line2 = g_freqs.readline()
f2 = line2.split()
seen.add(f2[0])
freqs.append(line1.strip())
freqs.append(line2.strip())
line1 = f_freqs.readline()
line2 = g_freqs.readline()
freqs = freqs[:joint_vocab_size + 4]
with open(f'{freqs_path}.txt', 'w') as f_freqs:
f_freqs.write('\n'.join(freqs))
wtoi = {
word.split()[0]: idx for idx, word in enumerate(freqs)
}
# Save vocabularly.
with open(f'{vocab_path}.txt', 'w') as f_vocab:
f_vocab.write('\n'.join(
word.split()[0] for idx, word in enumerate(freqs)))
bpe_toks = {}
for lang in langs:
# Apply the BPE codes.
if not use_cache or not os.path.isfile(f'{bpe_path}.{lang}'):
logging.info(f'Applying BPE for language {lang}.')
with open(f'{tok_path}.filtered.{lang}', 'r') as f_in:
apply_cmd = [
'subword-nmt', 'apply-bpe', '-c', codes_path,
'--vocabulary', f'{freqs_path}.txt',
]
bpe_sents = subprocess.check_output(
apply_cmd, stdin=f_in).decode('utf-8').split('\n')
bpe_toks[lang] = [
([wtoi['[CLS]']] + [wtoi[word]
if word in wtoi else wtoi['[UNK]']
for word in sent.split()]
+ [wtoi['[SEP]']]
+ [wtoi['[PAD]']] * (
self.max_length - len(sent.split()) - 1)
)[:self.max_length + 1]
for sent in bpe_sents if sent.split()
]
with open(f'{bpe_path}.{lang}', 'wb') as f_bpe:
pickle.dump(bpe_toks[lang], f_bpe)
else:
logging.info(f'Using previously calculated BPE tokenization '
f'for {lang}.')
with open(f'{bpe_path}.{lang}', 'rb') as f_bpe:
bpe_toks[lang] = pickle.load(f_bpe)
return bpe_toks
def _save(self, data, valid_size, dtype='int32'):
"""Saves the datasets to HDF5 files."""
h5path = os.path.join(self.data_dir, self.name)
for lang in data:
h5file = f'{h5path}/{lang}.h5'
logging.info(f'Saving {lang} dataset to {h5file}')
with h5py.File(h5file, 'w') as f:
train_ds = f.create_dataset(
'train', data=data[lang][:-valid_size], dtype=np.int32)
train_ds.attrs['dtype'] = dtype
valid_ds = f.create_dataset(
'valid', data=data[lang][-valid_size:], dtype=np.int32)
valid_ds.attrs['dtype'] = dtype
return [f'{h5path}/{lang}.h5' for lang in data]
def _shuffle(self, toks):
"""Shuffles the sentences in `toks`."""
logging.info('Shuffling datasets.')
new_toks = {}
toks_list = list(zip(*[toks[lang] for lang in toks]))
random.shuffle(toks_list)
d = list(zip(*toks_list))
for i, lang in enumerate(toks):
new_toks[lang] = d[i]
return new_toks
def create(self, datafiles, joint_vocab_size, max_size=None, valid_size=0,
use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
tok_path = self._tokenize(out_path, langs, use_cache)
bpe_toks = self._encode(tok_path, langs, joint_vocab_size, use_cache)
if self.shuffle:
bpe_toks = self._shuffle(bpe_toks)
return self._save(bpe_toks, valid_size, dtype='int32')
class BertCorpus(LanguageCorpus):
"""
This is a `LanguageCorpus` which uses BERT's multilingual BPE vocabulary
to tokenize.
BERT's multilingual vocabulary supports 100 languages in one, so it has
approximately 114,000 tokens.
"""
def __init__(self,
name,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
# These are tokens '[CLS]', '[SEP]', '[PAD]'
self.bos, self.eos, self.pad = 101, 102, 0
self.emb_size = 768
def _encode(self, raw_text_path, langs, use_cache=False):
"""
Encodes sentences listed one per line in file `raw_text_path` as seqs
of integers indexing into the BERT multilingual vocabulary.
"""
self._filter_sents(raw_text_path, langs, use_cache)
# Load saved tokenized data if we cached it during a previous run.
int_tok_path = os.path.join(self.data_dir, self.name, f'int_tok.pickle')
if use_cache and os.path.isfile(int_tok_path):
logging.info(f'Loading BPE tokenized data from {int_tok_path}.')
try:
with open(int_tok_path, 'rb') as f:
return pickle.load(f)
except Exception as e:
logging.warning(
f'Loading cached BPE tokenized int data failed: {str(e)}.')
# Load Bert tokenizer.
logging.info(f'Encoding data as BPE token indices.')
# WARNING: If you change the tokenizer, then make sure the above
# hard-coded bos, eos and pad token indices are correct.
tokenizer = BertTokenizer.from_pretrained(
'bert-base-multilingual-cased', do_lower_case=False)
# Tokenize the sentences in the given files.
lengths = {}
ts = {}
for lang in langs:
with open(f'{raw_text_path}.filtered.{lang}', 'r') as f:
logging.info(f'Converting {lang} text to BPE token indices.')
ts[lang] = [
tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(sent))[:self.max_length]
for sent in f
]
lengths[lang] = [len(sent) for sent in ts[lang]]
# Vectors will have length `max_len + 1` to account for BOS.
max_len = max([ll for lang in langs for ll in lengths[lang]])
toks = {}
for lang in langs:
logging.info(f'Adding BOS, EOS and PAD tokens for {lang}.')
toks[lang] = [
([self.bos] + sent + [self.eos]
+ [self.pad] * (max_len - len(sent) - 1))[:max_len + 1]
for sent in ts[lang]
]
# Save vocabulary to file. (It will be called `vocab.txt`.)
vocab_dir = os.path.join(self.data_dir, self.name)
tokenizer.save_vocabulary(vocab_dir)
# Save BPE tokenized data so we do not have to recompute if we rerun.
with open(int_tok_path, 'wb') as f:
logging.info(f'Saving BPE tokenized data to {int_tok_path}.')
pickle.dump((toks, lengths), f)
return toks, lengths
def _save_with_lens(self, data, lens, valid_size, dtype='int32'):
"""
Saves the datasets to one HDF5 file per language together with
the list of the sentence lengths.
This separates `valid_size` sentences from the end of the training
dataset to form the validation set.
"""
h5path = os.path.join(self.data_dir, self.name)
for lang in data:
with h5py.File(f'{h5path}/{lang}.h5', 'w') as f:
train_ds = f.create_dataset(
'train', data=data[lang][:-valid_size])
train_ds.attrs['dtype'] = dtype
train_lens_ds = f.create_dataset(
'train_lens', data=lens[lang][:-valid_size])
train_lens_ds.attrs['dtype'] = dtype
valid_ds = f.create_dataset(
'valid', data=data[lang][-valid_size:])
valid_ds.attrs['dtype'] = dtype
valid_lens_ds = f.create_dataset(
'valid_lens', data=lens[lang][-valid_size:])
valid_lens_ds.attrs['dtype'] = dtype
return [f'{h5path}/{lang}.h5' for lang in data]
def _shuffle_with_lens(self, toks, lens):
"""Shuffles datasets which have associated sentence length lists."""
logging.info('Shuffling datasets.')
new_toks, new_lens = {}, {}
toks_lens = (
[toks[lang] for lang in toks] + [lens[lang] for lang in lens])
toks_lens = list(zip(*toks_lens))
random.shuffle(toks_lens)
d = list(zip(*toks_lens))
for i, lang in enumerate(toks):
new_toks[lang] = d[i]
new_lens[lang] = d[i + len(toks)]
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class DropNthTokenCorpus(BertCorpus):
"""
This is a corpus where every nth word has been dropped. The BOS token
and the first token of the sentence are never dropped. The remaining
non-padding tokens are always terminated by a EOS token.
This keep `n` versions of each sentence where the token dropping
starts at different offsets.
"""
def __init__(self,
name,
n,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.n = n
def _subsample(self, toks, lens):
"""
Discards every nth token from `toks`.
"""
logging.info(f'Discarding every {self.n}th token.')
max_len = min([len(toks[lang][0]) for lang in toks])
new_max_len = (max_len - max_len // self.n) + 1
new_toks = {lang: [] for lang in toks}
new_lens = {lang: [] for lang in lens}
for lang in toks:
for sent, ll in zip(toks[lang], lens[lang]):
for k in range(self.n):
new_sent = [
self.eos if ll + 1 <= i and i <= ll + 2 else w
for i, w in enumerate(sent)
if ((i - 1) % self.n != k or i == 1)
]
new_sent = \
new_sent + [self.pad] * (new_max_len - len(new_sent))
new_toks[lang].append(new_sent)
new_lens[lang].append(
ll - (ll + self.n - k - 1) // self.n + int(k == 0))
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""
Create the dataset from `datafiles` by dropping every nth token.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
self._shuffle_with_lens(toks, lens)
toks, lens = self._subsample(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class KeepRandomPercentCorpus(BertCorpus):
"""
This is a corpus which contains each sentence from `tok` starting
with BOS and the first token of the sentence and with `p` percent total
tokens randomly kept. The rest of the tokens are discarded.
The indices of discarded tokens agree across languages.
"""
def __init__(self,
name,
p,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.p = p
def _subsample(self, toks, lens):
"""
Keep `self.p` percent tokens from every sentence. Removed tokens
can be padding as well as part of the sentence.
"""
logging.info(f'Keeping random set of {self.p * 100}% of tokens.')
max_len = min([len(toks[lang][0]) for lang in toks])
n = math.ceil(max_len * self.p)
new_toks = {lang: [] for lang in toks}
new_lens = {lang: [] for lang in lens}
lang1, lang2 = tuple(new_toks.keys())
for sent1, l1, sent2, l2 in zip(toks[lang1], lens[lang1],
toks[lang2], lens[lang2]):
indices = list(range(2, max_len)) # Never drop BOS or first token.
random.shuffle(indices)
indices = indices[:n]
indices.sort()
new_sent1 = sent1[:2] + [sent1[i] for i in indices]
new_sent2 = sent2[:2] + [sent2[i] for i in indices]
# Add back EOS token if it was dropped.
for i, c in enumerate(new_sent1):
if c == self.eos:
break
elif c == self.pad:
new_sent1[i] = self.eos
new_lens[lang1].append(i - 1)
break
for i, c in enumerate(new_sent2):
if c == self.eos:
break
elif c == self.pad:
new_sent2[i] = self.eos
new_lens[lang2].append(i - 1)
break
new_toks[lang1].append(new_sent1)
new_toks[lang2].append(new_sent2)
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""
Create the dataset from `datafiles` by keeping `p` percent of
the input/output tokens.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
self._shuffle_with_lens(toks, lens)
toks, lens = self._subsample(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class WindowedCorpus(BertCorpus):
"""
This is a corpus formed by selecting a window of tokens of length
`window_size` from another corpus.
The window is applied at two positions.
1. At the beginning of the sentence. This ensures the model learns how
to begin a sentence.
2. Starting at the middle of the sentence. This requires knowledge of the
length of each sentence.
"""
def __init__(self,
name,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
def _window(self, toks, lens, window_size):
"""
Selects two windows of size `window_size` from each sentence in
`toks` that has length given in `lens`.
"""
# lens do not include BOS or EOS tokens.
new_toks = {}
new_lens = {}
for lang in toks:
new_toks[lang] = []
new_lens[lang] = []
for i, sent in enumerate(toks[lang]):
n = lens[lang][i]
new_toks[lang].append(sent[:window_size])
new_lens[lang].append(min(n, window_size - 1))
new_toks[lang].append(sent[n // 2:n // 2 + window_size])
new_lens[lang].append(min(n - n // 2, n // 2 + window_size))
return new_toks, new_lens
def create(self, datafiles, max_size=None, window_size=25, valid_size=0,
use_cache=False):
"""
Create a dataset from `datafiles` by randomly selecting a window
of `window_size` tokens from every sentence.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
toks, lens = self._window(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class EmbeddingCorpus(BertCorpus):
"""
This class represents a corpus composed of embedding vectors. Presumably
any model training on it would use MSE as the loss function.
"""
def __init__(self,
name,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.bos_emb, self.eos_emb, self.pad_emb = None, None, None
def _embed(self, toks):
"""
This converts the lists of integers in `toks` are converted to
embedding vectors using BERT's multlingual case model.
"""
def apply_emb(x):
"""
This function applies the BERT embedding layer to `x`. It is
called by the mapping function. It must be a sub-function so
it has access to `bert_emb`.
`dask.array.map_blocks()` requires the mapping function to
always return an array with the same shape as the calling array's
`chunksize`.
"""
emb = np.array(bert_emb(torch.LongTensor(x)).data, dtype=np.float32)
if x.shape[0] < chunk_size:
# This is a technical step to prevent returning too few rows.
dims = (chunk_size - x.shape[0], max_length, self.emb_size)
return np.concatenate((emb, np.zeros(dims, dtype=np.float32)))
return emb
bert_model = BertModel.from_pretrained('bert-base-multilingual-cased')
bert_model.eval()
bert_emb = bert_model.embeddings.word_embeddings
embs = {}
chunk_size = 1024
for lang in toks:
max_length = len(toks[lang][0])
toks[lang] = dask.array.from_array(
np.array(toks[lang], dtype=np.int32),
chunks=(chunk_size, max_length))
logger.info(f'Calculating embeddings for language {lang}.')
embs[lang] = toks[lang].map_blocks(
apply_emb,
chunks=(chunk_size, max_length, self.emb_size),
dtype=np.float32,
new_axis=[2])
self.bos_emb = np.array(
bert_emb(torch.tensor([self.bos])).data[0], dtype=np.float32)
self.eos_emb = np.array(
bert_emb(torch.tensor([self.eos])).data[0], dtype=np.float32)
self.pad_emb = np.array(
bert_emb(torch.tensor([self.pad])).data[0], dtype=np.float32)
return embs
def _save(self, embs, valid_size):
"""Saves the dask arrays in `embs` to HDF5 files."""
h5path = os.path.join(self.data_dir, self.name)
h5files = []
for lang in embs:
h5file = f'{h5path}/{lang}.h5'
h5files.append(h5file)
embs[lang][:-valid_size].to_hdf5(h5file, 'train')
embs[lang][-valid_size:].to_hdf5(h5file, 'valid')
with h5py.File(h5file, 'w') as f:
f['train'].attrs['dtype'] = 'float32'
f['valid'].attrs['dtype'] = 'float32'
return h5files
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, _ = self._encode(out_path, langs)
if self.shuffle:
toks = self._shuffle(toks)
embs = self._embed(toks)
# Save the datasets to an hdf5 file on disk.
return self._save(embs, valid_size)
class LowResolutionEmbeddingCorpus(EmbeddingCorpus):
"""
This is a corpus of BERT embedding vectors which have been averaged
by a sliding window of size `window_size` moving `window_step` tokens
each step.
The EOS and PAD tokens are preserved *without* averaging.
Presumably any model training on this dataset would use MSE as the
loss function.
"""
def __init__(self,
name,
window_step=2,
window_size=2,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.window_step = window_step
self.window_size = window_size
def _avg_embs(self, embs, lengths):
"""
Averages the embeddings of `embs` which represent sentences with
lengths given by `lengths`.
"""
def eos_and_pad(emb):
"""
Restore EOS marker and PAD tokens after it. This is called by
`apply_along_axis()`.
This must be a subfunction of `_avg_embs()` so that it has access
to the `max_length` variable.
"""
n = int(round(emb[0]))
row = n // max_len
col = n % max_len
if row >= len(lengths):
return emb
elif lengths[row] == col - 1:
return eos_emb
elif (col - 1 > lengths[row]
and col <= lengths[row] + self.window_size):
return pad_emb
return emb
logger.info('Calcuating average embeddings.')
bos = (self.bos_emb.reshape((1, 1, self.emb_size))
.repeat(embs.shape[0], axis=0))
avg_embs = dask.array.concatenate(
[bos] + [
embs[:, i:i + self.window_size, :].mean(
axis=1, keepdims=True)
for i in range(1, embs.shape[1], self.window_step)
], axis=1).astype(np.float32)
# Add a coordinate to the front of every embedding vector containing
# a number that determines the sentence and token of the vector.
# This is the only way to get that info to `eos_and_pad`.
eos_emb = np.concatenate([[-1], self.eos_emb])
pad_emb = np.concatenate([[-1], self.pad_emb])
max_len = int(avg_embs.shape[1])
indices = dask.array.arange(avg_embs.shape[0] * max_len)
indices = indices.reshape((avg_embs.shape[0], max_len, 1))
avg_embs = dask.array.concatenate([indices, avg_embs], axis=2)
avg_embs = avg_embs.rechunk((1024, max_len, len(eos_emb)))
# The dask version of `apply_along_axis()` is broken or does not behave
# like the numpy version, so we have to use `map_blocks()`.
logger.info('Fixing EOS and PAD tokens.')
avg_embs = avg_embs.map_blocks(
lambda b: np.apply_along_axis(eos_and_pad, 2, b),
chunks=(1024, max_len, len(eos_emb)),
dtype=np.float32)
# Drop indices that were stored as first embedding dimension.
avg_embs = avg_embs[:, :, 1:]
return avg_embs
def _save(self, avg_embs, valid_size):
"""
Saves the dask arrays containing averaged embeddings to HDF5 files.
"""
h5path = os.path.join(self.data_dir, self.name)
h5files = []
for lang in avg_embs:
logger.info(f'Saving HDF5 file for language {lang}.')
h5file = f'{h5path}/{lang}.h5'
h5files.append(h5file)
logging.info(f'Saving {lang} dataset to {h5file}')
with h5py.File(h5file, 'w') as f_out:
# Use `store()`. `to_hdf5` produces empty fie for some reason.
train = avg_embs[lang][:-valid_size]
train_ds = f_out.require_dataset(
'train', shape=train.shape, dtype=train.dtype)
train_ds.attrs['dtype'] = 'float32'
dask.array.store(train, train_ds)
valid = avg_embs[lang][-valid_size:]
valid_ds = f_out.require_dataset(
'valid', shape=valid.shape, dtype=valid.dtype)
valid_ds.attrs['dtype'] = 'float32'
dask.array.store(valid, valid_ds)
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
embs = self._embed(toks)
avg_embs = {}
for lang in langs:
avg_embs[lang] = self._avg_embs(embs[lang], lens[lang])
return self._save(avg_embs, valid_size) | src/corpus.py | import dask.array
import h5py
import logging
import math
import numpy as np
import os
import pickle
from pytorch_pretrained_bert import BertTokenizer, BertModel
import random
import subprocess
import torch
import urllib
nonbreaking_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/share'
'/nonbreaking_prefixes/nonbreaking_prefix')
normalize_punct_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/normalize-punctuation.perl')
remove_nonprint_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/remove-non-printing-char.perl')
tokenizer_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/tokenizer.perl')
logger = logging.getLogger('fr2en')
class LanguageCorpus:
"""
This is the most basic corpus and base class for other corpora.
It uses a perl script from Moses to tokenize and `subword-nmt` to form
BPE vocabulary. These are standard tools for preprocessing, see e.g.
https://github.com/pytorch/fairseq/blob/master/examples/translation/prepare-wmt14en2de.sh # noqa: E501
It outputs sequences of integers indexing into the vocabulary.
Moses is available at https://github.com/moses-smt/mosesdecoder.
"""
data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
moses_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'data', 'moses')
def __init__(self,
name,
shuffle=True,
max_length=200):
"""`max_length` is the maximum length of a sentence in BPE tokens."""
self.name = name
self.shuffle = shuffle
self.max_length = max_length
os.makedirs(os.path.join(self.moses_dir, 'tokenizer'), exist_ok=True)
def _clean(self, datafiles, max_size=None, use_cache=False):
"""
Downloads Moses perl scripts if necessary, and uses them to normalize
punctuation and remove non-printable characters.
"""
# Download datafiles.
normpunct_fn = normalize_punct_url.split('/')[-1]
normpunct_path = os.path.join(self.moses_dir, 'tokenizer', normpunct_fn)
remnon_fn = remove_nonprint_url.split('/')[-1]
remnon_path = os.path.join(self.moses_dir, 'tokenizer', remnon_fn)
if not os.path.isfile(normpunct_path):
urllib.request.urlretrieve(
normalize_punct_url, filename=normpunct_path)
if not os.path.isfile(remnon_path):
urllib.request.urlretrieve(
remove_nonprint_url, filename=remnon_path)
# Prepare an output directory.
out_path = os.path.join(self.data_dir, self.name, 'cleaned')
os.makedirs(os.path.join(self.data_dir, self.name), exist_ok=True)
# Concatenate datasets for each language.
langs = set()
for dataset in datafiles:
for lang in dataset:
langs.add(lang)
if not use_cache or not os.path.isfile(f'{out_path}.{lang}'):
os.system(f'cat {dataset[lang]} >> tmp.{lang}')
# Clean datasets for each language.
for lang in langs:
if not use_cache or not os.path.isfile(f'{out_path}.{lang}'):
logger.info(f'Cleaning {lang} combined dataset.')
max_size = 100000000000 if max_size is None else max_size
os.system(f'head -n {max_size} tmp.{lang} '
f'| perl {normpunct_path} {lang} '
f'| perl {remnon_path} > {out_path}.{lang}')
os.system(f'rm -rf tmp.{lang}')
else:
logger.info(
f'Using previously cleaned dataset {out_path}.{lang}.')
return out_path, list(langs)
def _tokenize(self, data_path, langs, use_cache=False):
"""Tokenizes into BPE tokens using a perl script from Moses."""
tokenizer_fn = tokenizer_url.split('/')[-1]
tokenizer_path = os.path.join(self.moses_dir, 'tokenizer', tokenizer_fn)
if not os.path.isfile(tokenizer_path):
urllib.request.urlretrieve(tokenizer_url, filename=tokenizer_path)
nonbreaking_dir = \
os.path.join(self.moses_dir, 'share', 'nonbreaking_prefixes')
os.makedirs(nonbreaking_dir, exist_ok=True)
nonbreaking_fn = nonbreaking_url.split('/')[-1]
nonbreaking_path = os.path.join(nonbreaking_dir, nonbreaking_fn)
for lang in langs:
if not os.path.isfile(f'{nonbreaking_path}.{lang}'):
urllib.request.urlretrieve(
f'{nonbreaking_url}.{lang}',
filename=f'{nonbreaking_path}.{lang}')
tok_path = os.path.join(self.data_dir, self.name, 'tokens')
for lang in langs:
if not use_cache or not os.path.isfile(f'{tok_path}.{lang}'):
logger.info(f'Tokenizing dataset {data_path}.{lang}.')
os.system(
f'cat {data_path}.{lang} '
f'| perl {tokenizer_path} -threads 8 -a -l {lang} '
f'> {tok_path}.{lang}')
else:
logger.info(
f'Using previously tokenized dataset {data_path}.{lang}')
return tok_path
def _filter_sents(self, tok_path, langs, use_cache=False):
logging.info('Filtering out sentence pairs with invalid lengths.')
# Filter out sentence pairs with invalid lengths.
if (not use_cache
or not os.path.isfile(f'{tok_path}.filtered.{langs[0]}')
or not os.path.isfile(f'{tok_path}.filtered.{langs[1]}')):
with open(f'{tok_path}.{langs[0]}', 'r') as f, \
open(f'{tok_path}.{langs[1]}', 'r') as g, \
open(f'{tok_path}.filtered.{langs[0]}', 'w') as f_out, \
open(f'{tok_path}.filtered.{langs[1]}', 'w') as g_out:
line1 = f.readline()
line2 = g.readline()
while line1 and line2:
l1 = len(line1.split())
l2 = len(line2.split())
if ((not (l1 > 1.5 * l2 or l2 > 1.5 * l1)
or (l1 <= 10 and l2 <= 10)) and l1 > 0 and l2 > 0):
# readline() keeps the newline, write() does not add one
f_out.write(line1)
g_out.write(line2)
line1 = f.readline()
line2 = g.readline()
def _encode(self, tok_path, langs, joint_vocab_size, use_cache=False):
"""
Tokenizes sentences using `subword-nmt` and converts them to sequences
of integers.
"""
# Learn joint BPE.
vocab_path = os.path.join(self.data_dir, self.name, 'vocab')
freqs_path = os.path.join(self.data_dir, self.name, 'freqs')
codes_path = os.path.join(self.data_dir, self.name, 'bpe_codes')
bpe_path = os.path.join(self.data_dir, self.name, 'int_toks')
if (not use_cache or not os.path.isfile(f'{freqs_path}.{langs[0]}')
or not os.path.isfile(codes_path)):
logging.info('Learning joint BPE.')
learn_cmd = (
'subword-nmt learn-joint-bpe-and-vocab '
f'--input {tok_path}.{langs[0]} {tok_path}.{langs[1]} '
f'-s {joint_vocab_size // 2} -o {codes_path} '
f'--write-vocabulary '
f'{freqs_path}.{langs[0]} {freqs_path}.{langs[1]}')
os.system(learn_cmd)
else:
logging.info('Using previously learned joint BPE.')
logging.info(f'Preparing joint vocabulary of size at most '
f'{joint_vocab_size + 4}.')
self._filter_sents(tok_path, langs, use_cache)
# Add special tokens to frequencies (word plus num of occurrences).
freqs = ['[PAD] 1000', '[UNK] 1000', '[CLS] 1000', '[SEP] 1000']
with open(f'{freqs_path}.{langs[0]}', 'r') as f_freqs, \
open(f'{freqs_path}.{langs[1]}', 'r') as g_freqs:
line1 = f_freqs.readline()
line2 = g_freqs.readline()
seen = set()
while line1 and line2:
f1 = line1.split()
f2 = line2.split()
while len(f1) < 2 or f1[0] in seen:
line1 = f_freqs.readline()
f1 = line1.split()
seen.add(f1[0])
while len(f2) < 2 or f2[0] in seen:
line2 = g_freqs.readline()
f2 = line2.split()
seen.add(f2[0])
freqs.append(line1.strip())
freqs.append(line2.strip())
line1 = f_freqs.readline()
line2 = g_freqs.readline()
freqs = freqs[:joint_vocab_size + 4]
with open(f'{freqs_path}.txt', 'w') as f_freqs:
f_freqs.write('\n'.join(freqs))
wtoi = {
word.split()[0]: idx for idx, word in enumerate(freqs)
}
# Save vocabularly.
with open(f'{vocab_path}.txt', 'w') as f_vocab:
f_vocab.write('\n'.join(
word.split()[0] for idx, word in enumerate(freqs)))
bpe_toks = {}
for lang in langs:
# Apply the BPE codes.
if not use_cache or not os.path.isfile(f'{bpe_path}.{lang}'):
logging.info(f'Applying BPE for language {lang}.')
with open(f'{tok_path}.filtered.{lang}', 'r') as f_in:
apply_cmd = [
'subword-nmt', 'apply-bpe', '-c', codes_path,
'--vocabulary', f'{freqs_path}.txt',
]
bpe_sents = subprocess.check_output(
apply_cmd, stdin=f_in).decode('utf-8').split('\n')
bpe_toks[lang] = [
([wtoi['[CLS]']] + [wtoi[word]
if word in wtoi else wtoi['[UNK]']
for word in sent.split()]
+ [wtoi['[SEP]']]
+ [wtoi['[PAD]']] * (
self.max_length - len(sent.split()) - 1)
)[:self.max_length + 1]
for sent in bpe_sents if sent.split()
]
with open(f'{bpe_path}.{lang}', 'wb') as f_bpe:
pickle.dump(bpe_toks[lang], f_bpe)
else:
logging.info(f'Using previously calculated BPE tokenization '
f'for {lang}.')
with open(f'{bpe_path}.{lang}', 'rb') as f_bpe:
bpe_toks[lang] = pickle.load(f_bpe)
return bpe_toks
def _save(self, data, valid_size, dtype='int32'):
"""Saves the datasets to HDF5 files."""
h5path = os.path.join(self.data_dir, self.name)
for lang in data:
h5file = f'{h5path}/{lang}.h5'
logging.info(f'Saving {lang} dataset to {h5file}')
with h5py.File(h5file, 'w') as f:
train_ds = f.create_dataset(
'train', data=data[lang][:-valid_size], dtype=np.int32)
train_ds.attrs['dtype'] = dtype
valid_ds = f.create_dataset(
'valid', data=data[lang][-valid_size:], dtype=np.int32)
valid_ds.attrs['dtype'] = dtype
return [f'{h5path}/{lang}.h5' for lang in data]
def _shuffle(self, toks):
"""Shuffles the sentences in `toks`."""
logging.info('Shuffling datasets.')
new_toks = {}
toks_list = list(zip(*[toks[lang] for lang in toks]))
random.shuffle(toks_list)
d = list(zip(*toks_list))
for i, lang in enumerate(toks):
new_toks[lang] = d[i]
return new_toks
def create(self, datafiles, joint_vocab_size, max_size=None, valid_size=0,
use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
tok_path = self._tokenize(out_path, langs, use_cache)
bpe_toks = self._encode(tok_path, langs, joint_vocab_size, use_cache)
if self.shuffle:
bpe_toks = self._shuffle(bpe_toks)
return self._save(bpe_toks, valid_size, dtype='int32')
class BertCorpus(LanguageCorpus):
"""
This is a `LanguageCorpus` which uses BERT's multilingual BPE vocabulary
to tokenize.
BERT's multilingual vocabulary supports 100 languages in one, so it has
approximately 114,000 tokens.
"""
def __init__(self,
name,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
# These are tokens '[CLS]', '[SEP]', '[PAD]'
self.bos, self.eos, self.pad = 101, 102, 0
self.emb_size = 768
def _encode(self, raw_text_path, langs, use_cache=False):
"""
Encodes sentences listed one per line in file `raw_text_path` as seqs
of integers indexing into the BERT multilingual vocabulary.
"""
self._filter_sents(raw_text_path, langs, use_cache)
# Load saved tokenized data if we cached it during a previous run.
int_tok_path = os.path.join(self.data_dir, self.name, f'int_tok.pickle')
if use_cache and os.path.isfile(int_tok_path):
logging.info(f'Loading BPE tokenized data from {int_tok_path}.')
try:
with open(int_tok_path, 'rb') as f:
return pickle.load(f)
except Exception as e:
logging.warning(
f'Loading cached BPE tokenized int data failed: {str(e)}.')
# Load Bert tokenizer.
logging.info(f'Encoding data as BPE token indices.')
# WARNING: If you change the tokenizer, then make sure the above
# hard-coded bos, eos and pad token indices are correct.
tokenizer = BertTokenizer.from_pretrained(
'bert-base-multilingual-cased', do_lower_case=False)
# Tokenize the sentences in the given files.
lengths = {}
ts = {}
for lang in langs:
with open(f'{raw_text_path}.filtered.{lang}', 'r') as f:
logging.info(f'Converting {lang} text to BPE token indices.')
ts[lang] = [
tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(sent))[:self.max_length]
for sent in f
]
lengths[lang] = [len(sent) for sent in ts[lang]]
# Vectors will have length `max_len + 1` to account for BOS.
max_len = max([ll for lang in langs for ll in lengths[lang]])
toks = {}
for lang in langs:
logging.info(f'Adding BOS, EOS and PAD tokens for {lang}.')
toks[lang] = [
([self.bos] + sent + [self.eos]
+ [self.pad] * (max_len - len(sent) - 1))[:max_len + 1]
for sent in ts[lang]
]
# Save vocabulary to file. (It will be called `vocab.txt`.)
vocab_dir = os.path.join(self.data_dir, self.name)
tokenizer.save_vocabulary(vocab_dir)
# Save BPE tokenized data so we do not have to recompute if we rerun.
with open(int_tok_path, 'wb') as f:
logging.info(f'Saving BPE tokenized data to {int_tok_path}.')
pickle.dump((toks, lengths), f)
return toks, lengths
def _save_with_lens(self, data, lens, valid_size, dtype='int32'):
"""
Saves the datasets to one HDF5 file per language together with
the list of the sentence lengths.
This separates `valid_size` sentences from the end of the training
dataset to form the validation set.
"""
h5path = os.path.join(self.data_dir, self.name)
for lang in data:
with h5py.File(f'{h5path}/{lang}.h5', 'w') as f:
train_ds = f.create_dataset(
'train', data=data[lang][:-valid_size])
train_ds.attrs['dtype'] = dtype
train_lens_ds = f.create_dataset(
'train_lens', data=lens[lang][:-valid_size])
train_lens_ds.attrs['dtype'] = dtype
valid_ds = f.create_dataset(
'valid', data=data[lang][-valid_size:])
valid_ds.attrs['dtype'] = dtype
valid_lens_ds = f.create_dataset(
'valid_lens', data=lens[lang][-valid_size:])
valid_lens_ds.attrs['dtype'] = dtype
return [f'{h5path}/{lang}.h5' for lang in data]
def _shuffle_with_lens(self, toks, lens):
"""Shuffles datasets which have associated sentence length lists."""
logging.info('Shuffling datasets.')
new_toks, new_lens = {}, {}
toks_lens = (
[toks[lang] for lang in toks] + [lens[lang] for lang in lens])
toks_lens = list(zip(*toks_lens))
random.shuffle(toks_lens)
d = list(zip(*toks_lens))
for i, lang in enumerate(toks):
new_toks[lang] = d[i]
new_lens[lang] = d[i + len(toks)]
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class DropNthTokenCorpus(BertCorpus):
"""
This is a corpus where every nth word has been dropped. The BOS token
and the first token of the sentence are never dropped. The remaining
non-padding tokens are always terminated by a EOS token.
This keep `n` versions of each sentence where the token dropping
starts at different offsets.
"""
def __init__(self,
name,
n,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.n = n
def _subsample(self, toks, lens):
"""
Discards every nth token from `toks`.
"""
logging.info(f'Discarding every {self.n}th token.')
max_len = min([len(toks[lang][0]) for lang in toks])
new_max_len = (max_len - max_len // self.n) + 1
new_toks = {lang: [] for lang in toks}
new_lens = {lang: [] for lang in lens}
for lang in toks:
for sent, ll in zip(toks[lang], lens[lang]):
for k in range(self.n):
new_sent = [
self.eos if ll + 1 <= i and i <= ll + 2 else w
for i, w in enumerate(sent)
if ((i - 1) % self.n != k or i == 1)
]
new_sent = \
new_sent + [self.pad] * (new_max_len - len(new_sent))
new_toks[lang].append(new_sent)
new_lens[lang].append(
ll - (ll + self.n - k - 1) // self.n + int(k == 0))
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""
Create the dataset from `datafiles` by dropping every nth token.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
self._shuffle_with_lens(toks, lens)
toks, lens = self._subsample(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class KeepRandomPercentCorpus(BertCorpus):
"""
This is a corpus which contains each sentence from `tok` starting
with BOS and the first token of the sentence and with `p` percent total
tokens randomly kept. The rest of the tokens are discarded.
The indices of discarded tokens agree across languages.
"""
def __init__(self,
name,
p,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.p = p
def _subsample(self, toks, lens):
"""
Keep `self.p` percent tokens from every sentence. Removed tokens
can be padding as well as part of the sentence.
"""
logging.info(f'Keeping random set of {self.p * 100}% of tokens.')
max_len = min([len(toks[lang][0]) for lang in toks])
n = math.ceil(max_len * self.p)
new_toks = {lang: [] for lang in toks}
new_lens = {lang: [] for lang in lens}
lang1, lang2 = tuple(new_toks.keys())
for sent1, l1, sent2, l2 in zip(toks[lang1], lens[lang1],
toks[lang2], lens[lang2]):
indices = list(range(2, max_len)) # Never drop BOS or first token.
random.shuffle(indices)
indices = indices[:n]
indices.sort()
new_sent1 = sent1[:2] + [sent1[i] for i in indices]
new_sent2 = sent2[:2] + [sent2[i] for i in indices]
# Add back EOS token if it was dropped.
for i, c in enumerate(new_sent1):
if c == self.eos:
break
elif c == self.pad:
new_sent1[i] = self.eos
new_lens[lang1].append(i - 1)
break
for i, c in enumerate(new_sent2):
if c == self.eos:
break
elif c == self.pad:
new_sent2[i] = self.eos
new_lens[lang2].append(i - 1)
break
new_toks[lang1].append(new_sent1)
new_toks[lang2].append(new_sent2)
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""
Create the dataset from `datafiles` by keeping `p` percent of
the input/output tokens.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
self._shuffle_with_lens(toks, lens)
toks, lens = self._subsample(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class WindowedCorpus(BertCorpus):
"""
This is a corpus formed by selecting a window of tokens of length
`window_size` from another corpus.
The window is applied at two positions.
1. At the beginning of the sentence. This ensures the model learns how
to begin a sentence.
2. Starting at the middle of the sentence. This requires knowledge of the
length of each sentence.
"""
def __init__(self,
name,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
def _window(self, toks, lens, window_size):
"""
Selects two windows of size `window_size` from each sentence in
`toks` that has length given in `lens`.
"""
# lens do not include BOS or EOS tokens.
new_toks = {}
new_lens = {}
for lang in toks:
new_toks[lang] = []
new_lens[lang] = []
for i, sent in enumerate(toks[lang]):
n = lens[lang][i]
new_toks[lang].append(sent[:window_size])
new_lens[lang].append(min(n, window_size - 1))
new_toks[lang].append(sent[n // 2:n // 2 + window_size])
new_lens[lang].append(min(n - n // 2, n // 2 + window_size))
return new_toks, new_lens
def create(self, datafiles, max_size=None, window_size=25, valid_size=0,
use_cache=False):
"""
Create a dataset from `datafiles` by randomly selecting a window
of `window_size` tokens from every sentence.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
toks, lens = self._window(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class EmbeddingCorpus(BertCorpus):
"""
This class represents a corpus composed of embedding vectors. Presumably
any model training on it would use MSE as the loss function.
"""
def __init__(self,
name,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.bos_emb, self.eos_emb, self.pad_emb = None, None, None
def _embed(self, toks):
"""
This converts the lists of integers in `toks` are converted to
embedding vectors using BERT's multlingual case model.
"""
def apply_emb(x):
"""
This function applies the BERT embedding layer to `x`. It is
called by the mapping function. It must be a sub-function so
it has access to `bert_emb`.
`dask.array.map_blocks()` requires the mapping function to
always return an array with the same shape as the calling array's
`chunksize`.
"""
emb = np.array(bert_emb(torch.LongTensor(x)).data, dtype=np.float32)
if x.shape[0] < chunk_size:
# This is a technical step to prevent returning too few rows.
dims = (chunk_size - x.shape[0], max_length, self.emb_size)
return np.concatenate((emb, np.zeros(dims, dtype=np.float32)))
return emb
bert_model = BertModel.from_pretrained('bert-base-multilingual-cased')
bert_model.eval()
bert_emb = bert_model.embeddings.word_embeddings
embs = {}
chunk_size = 1024
for lang in toks:
max_length = len(toks[lang][0])
toks[lang] = dask.array.from_array(
np.array(toks[lang], dtype=np.int32),
chunks=(chunk_size, max_length))
logger.info(f'Calculating embeddings for language {lang}.')
embs[lang] = toks[lang].map_blocks(
apply_emb,
chunks=(chunk_size, max_length, self.emb_size),
dtype=np.float32,
new_axis=[2])
self.bos_emb = np.array(
bert_emb(torch.tensor([self.bos])).data[0], dtype=np.float32)
self.eos_emb = np.array(
bert_emb(torch.tensor([self.eos])).data[0], dtype=np.float32)
self.pad_emb = np.array(
bert_emb(torch.tensor([self.pad])).data[0], dtype=np.float32)
return embs
def _save(self, embs, valid_size):
"""Saves the dask arrays in `embs` to HDF5 files."""
h5path = os.path.join(self.data_dir, self.name)
h5files = []
for lang in embs:
h5file = f'{h5path}/{lang}.h5'
h5files.append(h5file)
embs[lang][:-valid_size].to_hdf5(h5file, 'train')
embs[lang][-valid_size:].to_hdf5(h5file, 'valid')
with h5py.File(h5file, 'w') as f:
f['train'].attrs['dtype'] = 'float32'
f['valid'].attrs['dtype'] = 'float32'
return h5files
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, _ = self._encode(out_path, langs)
if self.shuffle:
toks = self._shuffle(toks)
embs = self._embed(toks)
# Save the datasets to an hdf5 file on disk.
return self._save(embs, valid_size)
class LowResolutionEmbeddingCorpus(EmbeddingCorpus):
"""
This is a corpus of BERT embedding vectors which have been averaged
by a sliding window of size `window_size` moving `window_step` tokens
each step.
The EOS and PAD tokens are preserved *without* averaging.
Presumably any model training on this dataset would use MSE as the
loss function.
"""
def __init__(self,
name,
window_step=2,
window_size=2,
shuffle=True,
max_length=200):
super().__init__(name, shuffle, max_length)
self.window_step = window_step
self.window_size = window_size
def _avg_embs(self, embs, lengths):
"""
Averages the embeddings of `embs` which represent sentences with
lengths given by `lengths`.
"""
def eos_and_pad(emb):
"""
Restore EOS marker and PAD tokens after it. This is called by
`apply_along_axis()`.
This must be a subfunction of `_avg_embs()` so that it has access
to the `max_length` variable.
"""
n = int(round(emb[0]))
row = n // max_len
col = n % max_len
if row >= len(lengths):
return emb
elif lengths[row] == col - 1:
return eos_emb
elif (col - 1 > lengths[row]
and col <= lengths[row] + self.window_size):
return pad_emb
return emb
logger.info('Calcuating average embeddings.')
bos = (self.bos_emb.reshape((1, 1, self.emb_size))
.repeat(embs.shape[0], axis=0))
avg_embs = dask.array.concatenate(
[bos] + [
embs[:, i:i + self.window_size, :].mean(
axis=1, keepdims=True)
for i in range(1, embs.shape[1], self.window_step)
], axis=1).astype(np.float32)
# Add a coordinate to the front of every embedding vector containing
# a number that determines the sentence and token of the vector.
# This is the only way to get that info to `eos_and_pad`.
eos_emb = np.concatenate([[-1], self.eos_emb])
pad_emb = np.concatenate([[-1], self.pad_emb])
max_len = int(avg_embs.shape[1])
indices = dask.array.arange(avg_embs.shape[0] * max_len)
indices = indices.reshape((avg_embs.shape[0], max_len, 1))
avg_embs = dask.array.concatenate([indices, avg_embs], axis=2)
avg_embs = avg_embs.rechunk((1024, max_len, len(eos_emb)))
# The dask version of `apply_along_axis()` is broken or does not behave
# like the numpy version, so we have to use `map_blocks()`.
logger.info('Fixing EOS and PAD tokens.')
avg_embs = avg_embs.map_blocks(
lambda b: np.apply_along_axis(eos_and_pad, 2, b),
chunks=(1024, max_len, len(eos_emb)),
dtype=np.float32)
# Drop indices that were stored as first embedding dimension.
avg_embs = avg_embs[:, :, 1:]
return avg_embs
def _save(self, avg_embs, valid_size):
"""
Saves the dask arrays containing averaged embeddings to HDF5 files.
"""
h5path = os.path.join(self.data_dir, self.name)
h5files = []
for lang in avg_embs:
logger.info(f'Saving HDF5 file for language {lang}.')
h5file = f'{h5path}/{lang}.h5'
h5files.append(h5file)
logging.info(f'Saving {lang} dataset to {h5file}')
with h5py.File(h5file, 'w') as f_out:
# Use `store()`. `to_hdf5` produces empty fie for some reason.
train = avg_embs[lang][:-valid_size]
train_ds = f_out.require_dataset(
'train', shape=train.shape, dtype=train.dtype)
train_ds.attrs['dtype'] = 'float32'
dask.array.store(train, train_ds)
valid = avg_embs[lang][-valid_size:]
valid_ds = f_out.require_dataset(
'valid', shape=valid.shape, dtype=valid.dtype)
valid_ds.attrs['dtype'] = 'float32'
dask.array.store(valid, valid_ds)
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
embs = self._embed(toks)
avg_embs = {}
for lang in langs:
avg_embs[lang] = self._avg_embs(embs[lang], lens[lang])
return self._save(avg_embs, valid_size) | 0.65379 | 0.172416 |
import argparse
import shutil
import sys
import tempfile
from datetime import datetime
import yaml
import socket
import os
from destinations.Destination import Destination
from Target import Target
class BackupContext:
def __init__(self, argparse_callback):
def _load_yaml(filename, required=True):
try:
with open(filename) as config_file:
return yaml.load(config_file)
except IOError:
print >> sys.stderr, "Could not find file %s" % filename
if required:
sys.exit(3)
else:
return object()
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--backups", required=False, default='/etc/der/backup.yml')
parser.add_argument("-c", "--credentials", required=False, default='/etc/der/credentials.yml')
parser.add_argument("-d", "--destinations", required=False, default='/etc/der/destinations.yml')
argparse_callback(parser)
self.args = parser.parse_args()
self.config = _load_yaml(self.args.backups)
self.credentials = _load_yaml(self.args.credentials, required=False)
self.destinations = _load_yaml(self.args.destinations)
self.now = datetime.now()
self.host = socket.gethostname()
self.temp_dir = None
self.temp_dir_delete = False
self.dry_run = False
def get_parser_argument(self, arg, default=None):
if default is None:
return getattr(self.args, arg)
else:
return getattr(self.args, arg, default)
def get_targets(self, specified_target_name):
if specified_target_name == 'all':
result = []
for target_name, target in self.config.iteritems():
result.append(Target(self, target_name, target))
return result
else:
if specified_target_name not in self.config:
return []
else:
return [Target(self, specified_target_name, self.config[specified_target_name])]
def get_destinations(self, specified_destination_id):
if specified_destination_id in self.destinations:
return [Destination.create(self, specified_destination_id, self.destinations[specified_destination_id])]
else:
return []
def set_temp_dir(self, temp_dir=None, delete=True):
if temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
else:
self.temp_dir = unicode(os.path.abspath(temp_dir), encoding='utf8')
self.temp_dir_delete = delete
self.log("using temporary dir {}; delete it: {}".format(self.temp_dir, self.temp_dir_delete))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
if self.temp_dir_delete:
shutil.rmtree(self.temp_dir, ignore_errors=True)
def log(self, message): # TODO add prefix, datetime
print message
def log_error(self, message): # TODO add prefix, datetime
print >> sys.stderr, message
def temp_path(self, local_path):
return os.path.join(self.temp_dir, local_path) | root/opt/der/lib/backup/BackupContext.py | import argparse
import shutil
import sys
import tempfile
from datetime import datetime
import yaml
import socket
import os
from destinations.Destination import Destination
from Target import Target
class BackupContext:
def __init__(self, argparse_callback):
def _load_yaml(filename, required=True):
try:
with open(filename) as config_file:
return yaml.load(config_file)
except IOError:
print >> sys.stderr, "Could not find file %s" % filename
if required:
sys.exit(3)
else:
return object()
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--backups", required=False, default='/etc/der/backup.yml')
parser.add_argument("-c", "--credentials", required=False, default='/etc/der/credentials.yml')
parser.add_argument("-d", "--destinations", required=False, default='/etc/der/destinations.yml')
argparse_callback(parser)
self.args = parser.parse_args()
self.config = _load_yaml(self.args.backups)
self.credentials = _load_yaml(self.args.credentials, required=False)
self.destinations = _load_yaml(self.args.destinations)
self.now = datetime.now()
self.host = socket.gethostname()
self.temp_dir = None
self.temp_dir_delete = False
self.dry_run = False
def get_parser_argument(self, arg, default=None):
if default is None:
return getattr(self.args, arg)
else:
return getattr(self.args, arg, default)
def get_targets(self, specified_target_name):
if specified_target_name == 'all':
result = []
for target_name, target in self.config.iteritems():
result.append(Target(self, target_name, target))
return result
else:
if specified_target_name not in self.config:
return []
else:
return [Target(self, specified_target_name, self.config[specified_target_name])]
def get_destinations(self, specified_destination_id):
if specified_destination_id in self.destinations:
return [Destination.create(self, specified_destination_id, self.destinations[specified_destination_id])]
else:
return []
def set_temp_dir(self, temp_dir=None, delete=True):
if temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
else:
self.temp_dir = unicode(os.path.abspath(temp_dir), encoding='utf8')
self.temp_dir_delete = delete
self.log("using temporary dir {}; delete it: {}".format(self.temp_dir, self.temp_dir_delete))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
if self.temp_dir_delete:
shutil.rmtree(self.temp_dir, ignore_errors=True)
def log(self, message): # TODO add prefix, datetime
print message
def log_error(self, message): # TODO add prefix, datetime
print >> sys.stderr, message
def temp_path(self, local_path):
return os.path.join(self.temp_dir, local_path) | 0.158891 | 0.062417 |
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
sysObj = tdklib.TDKScriptingLibrary("sysutil","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
sysObj.configureTestCase(ip,port,'TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_1_Values');
#Get the result of connection with test component and DUT
sysutilloadmodulestatus=sysObj.getLoadModuleResult();
if "SUCCESS" in sysutilloadmodulestatus.upper():
#Set the result status of execution
sysObj.setLoadModuleStatus("SUCCESS");
#Check if wifihealth.txt file is present
step = 1;
tdkTestObj = sysObj.createTestStep('ExecuteCmd');
cmd = "[ -f /rdklogs/logs/wifihealth.txt ] && echo \"File exist\" || echo \"File does not exist\"";
tdkTestObj.addParameter("command",cmd);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
print "\nTEST STEP %d: Check for wifihealth log file presence" %step;
print "EXPECTED RESULT %d:wifihealth log file should be present" %step;
if details == "File exist":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d:wifihealth log file is present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Check for the maraker CHUTIL_1
step = step + 1;
tdkTestObj = sysObj.createTestStep('ExecuteCmd');
cmd = "grep -ire \"CHUTIL_1\" /rdklogs/logs/wifihealth.txt";
tdkTestObj.addParameter("command",cmd);
expectedresult="SUCCESS";
print "\nTEST STEP %d: Check for the presence of the marker CHUTIL_1" %step;
print "EXPECTED RESULT %d: CHUTIL_1 marker should be present" %step;
markerfound = 0;
#Giving 15 iterations of 60s each as the default value of Channel Utility Log Interval is 900s
for iteration in range(1,16):
print "Waiting for the marker to get populated in wifihealth.txt....\nIteration : %d" %iteration;
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and "CHUTIL_1" in details:
markerfound = 1;
break;
else:
sleep(60);
continue;
if markerfound == 1:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: CHUTIL_1 marker is found; Details : %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Get the values for <CU due to self BSS from transmitted traffic>,<CU due to self BSS from received traffic >,<CU due to overlapping BSS>
marker_list = details.split("split:")[1].split(",");
print "Marker_list : ", marker_list;
step = step + 1;
print "\nTEST STEP %d : Get the values for CU due to self BSS from transmitted traffic, CU due to self BSS from received traffic and CU due to overlapping BSS" %step;
print "EXPECTED RESULT %d : Should get the values for CU due to self BSS from transmitted traffic, CU due to self BSS from received traffic and CU due to overlapping BSS" %step;
if marker_list[0] != "" or marker_list[1] != "" or marker_list[2] != "" or marker_list[0] != " " or marker_list[1] != " " or marker_list[2] != " ":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: The marker values are retrieved successfully" %(step);
print "CU due to self BSS from transmitted traffic : %s" %marker_list[0];
print "CU due to self BSS from received traffic : %s" %marker_list[1];
print "CU due to overlapping BSS : %s" %marker_list[2];
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else :
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: The marker values are not retrieved successfully" %(step);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: CHUTIL_1 marker is not found; Details : %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d:wifihealth log file is not present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
sysObj.unloadModule("sysutil");
else:
print "Failed to load module";
sysObj.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_1_Values.py | # use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
sysObj = tdklib.TDKScriptingLibrary("sysutil","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
sysObj.configureTestCase(ip,port,'TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_1_Values');
#Get the result of connection with test component and DUT
sysutilloadmodulestatus=sysObj.getLoadModuleResult();
if "SUCCESS" in sysutilloadmodulestatus.upper():
#Set the result status of execution
sysObj.setLoadModuleStatus("SUCCESS");
#Check if wifihealth.txt file is present
step = 1;
tdkTestObj = sysObj.createTestStep('ExecuteCmd');
cmd = "[ -f /rdklogs/logs/wifihealth.txt ] && echo \"File exist\" || echo \"File does not exist\"";
tdkTestObj.addParameter("command",cmd);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
print "\nTEST STEP %d: Check for wifihealth log file presence" %step;
print "EXPECTED RESULT %d:wifihealth log file should be present" %step;
if details == "File exist":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d:wifihealth log file is present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Check for the maraker CHUTIL_1
step = step + 1;
tdkTestObj = sysObj.createTestStep('ExecuteCmd');
cmd = "grep -ire \"CHUTIL_1\" /rdklogs/logs/wifihealth.txt";
tdkTestObj.addParameter("command",cmd);
expectedresult="SUCCESS";
print "\nTEST STEP %d: Check for the presence of the marker CHUTIL_1" %step;
print "EXPECTED RESULT %d: CHUTIL_1 marker should be present" %step;
markerfound = 0;
#Giving 15 iterations of 60s each as the default value of Channel Utility Log Interval is 900s
for iteration in range(1,16):
print "Waiting for the marker to get populated in wifihealth.txt....\nIteration : %d" %iteration;
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and "CHUTIL_1" in details:
markerfound = 1;
break;
else:
sleep(60);
continue;
if markerfound == 1:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: CHUTIL_1 marker is found; Details : %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Get the values for <CU due to self BSS from transmitted traffic>,<CU due to self BSS from received traffic >,<CU due to overlapping BSS>
marker_list = details.split("split:")[1].split(",");
print "Marker_list : ", marker_list;
step = step + 1;
print "\nTEST STEP %d : Get the values for CU due to self BSS from transmitted traffic, CU due to self BSS from received traffic and CU due to overlapping BSS" %step;
print "EXPECTED RESULT %d : Should get the values for CU due to self BSS from transmitted traffic, CU due to self BSS from received traffic and CU due to overlapping BSS" %step;
if marker_list[0] != "" or marker_list[1] != "" or marker_list[2] != "" or marker_list[0] != " " or marker_list[1] != " " or marker_list[2] != " ":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: The marker values are retrieved successfully" %(step);
print "CU due to self BSS from transmitted traffic : %s" %marker_list[0];
print "CU due to self BSS from received traffic : %s" %marker_list[1];
print "CU due to overlapping BSS : %s" %marker_list[2];
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else :
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: The marker values are not retrieved successfully" %(step);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: CHUTIL_1 marker is not found; Details : %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d:wifihealth log file is not present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
sysObj.unloadModule("sysutil");
else:
print "Failed to load module";
sysObj.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | 0.269422 | 0.207094 |
import numpy as np
from tabulate import tabulate
from artemis.general.dead_easy_ui import DeadEasyUI
class TableExplorerUI(DeadEasyUI):
def __init__(self, table_data, col_headers=None, row_headers=None, col_indices=None, row_indices = None):
assert all(len(r)==len(table_data[0]) for r in table_data), "All rows of table data must have the same length. Got lengths: {}".format([len(r) for r in table_data])
table_data = np.array(table_data, dtype=object)
assert table_data.ndim==2, "Table must consist of 2d data"
assert col_headers is None or len(col_headers)==table_data.shape[1]
assert row_headers is None or len(row_headers)==table_data.shape[0]
self._table_data = table_data
self._col_indices = np.array(col_indices) if col_indices is not None else None
self._row_indices = np.array(row_indices) if row_indices is not None else None
self._col_headers = np.array(col_headers) if col_headers is not None else None
self._row_headers = np.array(row_headers) if row_headers is not None else None
self._old_data_buffer = []
@property
def n_rows(self):
return self._table_data.shape[0]
@property
def n_cols(self):
return self._table_data.shape[1]
def _get_full_table(self):
n_total_rows = 1 + int(self._col_headers is not None) + self._table_data.shape[0]
n_total_cols = 1 + int(self._row_headers is not None) + self._table_data.shape[1]
table_data = np.empty((n_total_rows, n_total_cols), dtype=object)
table_data[:2, :2] = ''
table_data[0, -self.n_cols:] = self._col_indices if self._col_indices is not None else ['{}'.format(i) for i in range(1, self.n_cols+1)]
table_data[-self.n_rows:, 0] = self._row_indices if self._row_indices is not None else ['{}'.format(i) for i in range(1, self.n_rows+1)]
if self._col_headers is not None:
table_data[1, -self.n_cols:] = self._col_headers
if self._row_headers is not None:
table_data[-self.n_rows:, 1] = self._row_headers
table_data[-self.n_rows:, -self.n_cols:] = self._table_data
return table_data
def _get_menu_string(self):
table_str = tabulate(self._get_full_table())
return '{}\n'.format(table_str)
def _backup(self):
self._old_data_buffer.append((self._table_data, self._row_headers, self._row_indices, self._col_headers, self._col_indices))
def undo(self):
if len(self._old_data_buffer)==0:
print("Can't undo, no history")
else:
self._table_data, self._row_headers, self._row_indices, self._col_headers, self._col_indices = self._old_data_buffer.pop()
def _parse_indices(self, user_range):
if isinstance(user_range, str):
user_range = user_range.split(',')
return [int(i)-1 for i in user_range]
def _reindex(self, row_ixs=None, col_ixs=None):
self._backup()
if row_ixs is not None:
self._table_data = self._table_data[row_ixs, :]
if self._row_headers is not None:
self._row_headers = self._row_headers[row_ixs]
if self._row_indices is not None:
self._row_indices = self._row_indices[row_ixs]
if col_ixs is not None:
self._table_data = self._table_data[:, col_ixs]
if self._col_headers is not None:
self._col_headers = self._col_headers[col_ixs]
if self._col_indices is not None:
self._col_indices = self._col_indices[col_ixs]
def delcol(self, user_range):
self._reindex(col_ixs=[i for i in range(self.n_cols) if i not in self._parse_indices(user_range)])
def delrow(self, user_range):
self._reindex(row_ixs=[i for i in range(self.n_rows) if i not in self._parse_indices(user_range)])
def shufrows(self, user_range):
indices = self._parse_indices(user_range)
self._reindex(row_ixs=indices + [i for i in range(self.n_rows) if i not in indices])
def shufcols(self, user_range):
indices = self._parse_indices(user_range)
self._reindex(col_ixs=indices + [i for i in range(self.n_cols) if i not in indices])
def sortrows(self, by_cols=None, shuffle_cols=True):
key_order_indices = self._parse_indices(by_cols) if by_cols is not None else range(self.n_cols)
sorting_data = self._table_data[:, key_order_indices[::-1]].copy()
for col in range(sorting_data.shape[1]):
if np.mean([np.isreal(x) for x in sorting_data[:, col]]) % 1 != 0: # Indicating not some numeric and some non-numeric data
sorting_data[:, col] = [(not np.isreal(x), x) for x in sorting_data[:, col]]
indices = np.lexsort(sorting_data.T)
self._reindex(row_ixs=indices)
if shuffle_cols:
self.shufcols(by_cols)
def sortcols(self, by_rows=None, shuffle_rows=True):
key_order_indices = self._parse_indices(by_rows) if by_rows is not None else range(self.n_rows)
indices = np.lexsort(self._table_data[key_order_indices[::-1], :])
self._reindex(col_ixs=indices)
if shuffle_rows:
self.shufrows(by_rows)
if __name__ == '__main__':
ui = TableExplorerUI(
col_headers=['param1', 'size', 'cost'],
row_headers=['exp1', 'exp2', 'exp3'],
table_data= [[4, 'Bella', 100], [3, 'Abe', 120], [4, 'Clarence', 117]],
)
ui.launch() | artemis/general/table_ui.py | import numpy as np
from tabulate import tabulate
from artemis.general.dead_easy_ui import DeadEasyUI
class TableExplorerUI(DeadEasyUI):
def __init__(self, table_data, col_headers=None, row_headers=None, col_indices=None, row_indices = None):
assert all(len(r)==len(table_data[0]) for r in table_data), "All rows of table data must have the same length. Got lengths: {}".format([len(r) for r in table_data])
table_data = np.array(table_data, dtype=object)
assert table_data.ndim==2, "Table must consist of 2d data"
assert col_headers is None or len(col_headers)==table_data.shape[1]
assert row_headers is None or len(row_headers)==table_data.shape[0]
self._table_data = table_data
self._col_indices = np.array(col_indices) if col_indices is not None else None
self._row_indices = np.array(row_indices) if row_indices is not None else None
self._col_headers = np.array(col_headers) if col_headers is not None else None
self._row_headers = np.array(row_headers) if row_headers is not None else None
self._old_data_buffer = []
@property
def n_rows(self):
return self._table_data.shape[0]
@property
def n_cols(self):
return self._table_data.shape[1]
def _get_full_table(self):
n_total_rows = 1 + int(self._col_headers is not None) + self._table_data.shape[0]
n_total_cols = 1 + int(self._row_headers is not None) + self._table_data.shape[1]
table_data = np.empty((n_total_rows, n_total_cols), dtype=object)
table_data[:2, :2] = ''
table_data[0, -self.n_cols:] = self._col_indices if self._col_indices is not None else ['{}'.format(i) for i in range(1, self.n_cols+1)]
table_data[-self.n_rows:, 0] = self._row_indices if self._row_indices is not None else ['{}'.format(i) for i in range(1, self.n_rows+1)]
if self._col_headers is not None:
table_data[1, -self.n_cols:] = self._col_headers
if self._row_headers is not None:
table_data[-self.n_rows:, 1] = self._row_headers
table_data[-self.n_rows:, -self.n_cols:] = self._table_data
return table_data
def _get_menu_string(self):
table_str = tabulate(self._get_full_table())
return '{}\n'.format(table_str)
def _backup(self):
self._old_data_buffer.append((self._table_data, self._row_headers, self._row_indices, self._col_headers, self._col_indices))
def undo(self):
if len(self._old_data_buffer)==0:
print("Can't undo, no history")
else:
self._table_data, self._row_headers, self._row_indices, self._col_headers, self._col_indices = self._old_data_buffer.pop()
def _parse_indices(self, user_range):
if isinstance(user_range, str):
user_range = user_range.split(',')
return [int(i)-1 for i in user_range]
def _reindex(self, row_ixs=None, col_ixs=None):
self._backup()
if row_ixs is not None:
self._table_data = self._table_data[row_ixs, :]
if self._row_headers is not None:
self._row_headers = self._row_headers[row_ixs]
if self._row_indices is not None:
self._row_indices = self._row_indices[row_ixs]
if col_ixs is not None:
self._table_data = self._table_data[:, col_ixs]
if self._col_headers is not None:
self._col_headers = self._col_headers[col_ixs]
if self._col_indices is not None:
self._col_indices = self._col_indices[col_ixs]
def delcol(self, user_range):
self._reindex(col_ixs=[i for i in range(self.n_cols) if i not in self._parse_indices(user_range)])
def delrow(self, user_range):
self._reindex(row_ixs=[i for i in range(self.n_rows) if i not in self._parse_indices(user_range)])
def shufrows(self, user_range):
indices = self._parse_indices(user_range)
self._reindex(row_ixs=indices + [i for i in range(self.n_rows) if i not in indices])
def shufcols(self, user_range):
indices = self._parse_indices(user_range)
self._reindex(col_ixs=indices + [i for i in range(self.n_cols) if i not in indices])
def sortrows(self, by_cols=None, shuffle_cols=True):
key_order_indices = self._parse_indices(by_cols) if by_cols is not None else range(self.n_cols)
sorting_data = self._table_data[:, key_order_indices[::-1]].copy()
for col in range(sorting_data.shape[1]):
if np.mean([np.isreal(x) for x in sorting_data[:, col]]) % 1 != 0: # Indicating not some numeric and some non-numeric data
sorting_data[:, col] = [(not np.isreal(x), x) for x in sorting_data[:, col]]
indices = np.lexsort(sorting_data.T)
self._reindex(row_ixs=indices)
if shuffle_cols:
self.shufcols(by_cols)
def sortcols(self, by_rows=None, shuffle_rows=True):
key_order_indices = self._parse_indices(by_rows) if by_rows is not None else range(self.n_rows)
indices = np.lexsort(self._table_data[key_order_indices[::-1], :])
self._reindex(col_ixs=indices)
if shuffle_rows:
self.shufrows(by_rows)
if __name__ == '__main__':
ui = TableExplorerUI(
col_headers=['param1', 'size', 'cost'],
row_headers=['exp1', 'exp2', 'exp3'],
table_data= [[4, 'Bella', 100], [3, 'Abe', 120], [4, 'Clarence', 117]],
)
ui.launch() | 0.563858 | 0.404037 |
import argparse
import sys
import json
from decimal import Decimal
import ijson
import requests
def parse_args():
parser = argparse.ArgumentParser(description='Load JSON documents into a CouchDB database')
parser.add_argument('-H', '--host', dest='host', default='localhost')
parser.add_argument('-P', dest='port', type=int, default=5984)
parser.add_argument('-u', dest='username')
parser.add_argument('-p', dest='password')
parser.add_argument('-b', dest='bulk_size', type=int, default=1000)
parser.add_argument('target_database', help='database to import data to')
return parser.parse_args()
def default(obj):
if isinstance(obj, Decimal):
return float(obj)
raise TypeError("Object of type '%s' is not JSON serializable" % type(obj).__name__)
def load(args, input_file, bulk_size=1000):
session = requests.session()
bulk = []
for doc in ijson.items(input_file, 'docs.item'):
bulk.append(doc)
if len(bulk) == bulk_size:
bulk_insert(args, session, bulk)
bulk = []
if bulk:
bulk_insert(args, session, bulk)
def bulk_insert(args, session, docs):
url = 'http://{}:{:d}/{}/_bulk_docs'.format(args.host, args.port, args.target_database)
auth = (args.username, args.password or '') if args.username else None
payload = json.dumps({'docs': docs}, separators=(',', ':'), default=default)
response = session.post(url, data=payload, auth=auth, headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Couch-Full-Commit': 'true'
})
if response.status_code == 201:
results = response.json()
for result in results:
if result['error']:
message = 'Failed to import document {}: {}. Reason: {}\n'.format(
result['id'], result['error'], result['reason'])
sys.stderr.write(message)
sys.exit(1)
else:
sys.stderr.write('Failed to import documents into {}\n'.format(args.target_database))
sys.exit(1)
def main():
args = parse_args()
load(args, sys.stdin, bulk_size=args.bulk_size)
main() | couchdb_load.py |
import argparse
import sys
import json
from decimal import Decimal
import ijson
import requests
def parse_args():
parser = argparse.ArgumentParser(description='Load JSON documents into a CouchDB database')
parser.add_argument('-H', '--host', dest='host', default='localhost')
parser.add_argument('-P', dest='port', type=int, default=5984)
parser.add_argument('-u', dest='username')
parser.add_argument('-p', dest='password')
parser.add_argument('-b', dest='bulk_size', type=int, default=1000)
parser.add_argument('target_database', help='database to import data to')
return parser.parse_args()
def default(obj):
if isinstance(obj, Decimal):
return float(obj)
raise TypeError("Object of type '%s' is not JSON serializable" % type(obj).__name__)
def load(args, input_file, bulk_size=1000):
session = requests.session()
bulk = []
for doc in ijson.items(input_file, 'docs.item'):
bulk.append(doc)
if len(bulk) == bulk_size:
bulk_insert(args, session, bulk)
bulk = []
if bulk:
bulk_insert(args, session, bulk)
def bulk_insert(args, session, docs):
url = 'http://{}:{:d}/{}/_bulk_docs'.format(args.host, args.port, args.target_database)
auth = (args.username, args.password or '') if args.username else None
payload = json.dumps({'docs': docs}, separators=(',', ':'), default=default)
response = session.post(url, data=payload, auth=auth, headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Couch-Full-Commit': 'true'
})
if response.status_code == 201:
results = response.json()
for result in results:
if result['error']:
message = 'Failed to import document {}: {}. Reason: {}\n'.format(
result['id'], result['error'], result['reason'])
sys.stderr.write(message)
sys.exit(1)
else:
sys.stderr.write('Failed to import documents into {}\n'.format(args.target_database))
sys.exit(1)
def main():
args = parse_args()
load(args, sys.stdin, bulk_size=args.bulk_size)
main() | 0.285372 | 0.085709 |
import os
import pdb
import sys
sys.path[0] = os.getcwd()
import cv2
import yaml
import argparse
from PIL import Image
from glob import glob
from os.path import exists, join
from easydict import EasyDict as edict
import torch
import numpy as np
import tracker.sot.lib.models as models
from tracker.sot.lib.utils.utils import load_dataset, crop_chw, \
gaussian_shaped_labels, cxy_wh_2_rect1, rect1_2_cxy_wh, cxy_wh_2_bbox
from tracker.sot.lib.core.eval_otb import eval_auc_tune
import utils
from model import AppearanceModel, partial_load
from data.vos import color_normalize, load_image, im_to_numpy, im_to_torch
def get_frames(video_name):
if not video_name:
cap = cv2.VideoCapture(0)
# warmup
for i in range(5):
cap.read()
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
elif video_name.endswith('avi') or video_name.endswith('mp4'):
cap = cv2.VideoCapture(video_name)
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
else:
images = glob(os.path.join(video_name, '*.jp*'))
images = sorted(images,
key=lambda x: int(x.split('/')[-1].split('.')[0]))
for img in images:
frame = cv2.imread(img)
yield frame
def preproc(img, im_mean, im_std, use_lab=False):
img = load_image(img)
if use_lab:
img = im_to_numpy(img)
img = (img*255).astype(np.uint8)[:, :, ::-1]
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
img = im_to_torch(img) / 255.
img = color_normalize(img, im_mean, im_std)
if use_lab:
img = torch.stack([img[0], ]*3)
img = img.permute(1, 2, 0).numpy() # H, W, C
return img
class TrackerConfig(object):
crop_sz = 512 + 8
downscale = 8
temp_sz = crop_sz // downscale
lambda0 = 1e-4
padding = 3.5
interp_factor = 0.01
num_scale = 3
scale_step = 1.0275
scale_factor = scale_step ** (np.arange(num_scale) - num_scale // 2)
min_scale_factor = 0.2
max_scale_factor = 5
scale_penalty = 0.985
scale_penalties = scale_penalty ** (np.abs((np.arange(num_scale) - num_scale // 2)))
net_output_size = [temp_sz, temp_sz]
cos_window = torch.Tensor(np.outer(np.hanning(temp_sz), np.hanning(temp_sz))).cuda()
def track(net, args):
toc = 0
config = TrackerConfig()
video_name = os.path.basename(args.input) if args.input else 'webcam'
regions = [] # FINAL RESULTS
for f, img_raw in enumerate(get_frames(args.input)):
img_raw = cv2.resize(img_raw, (640,480))
use_lab = getattr(args, 'use_lab', False)
im = preproc(img_raw, args.im_mean, args.im_std, use_lab)
tic = cv2.getTickCount()
# Init
if f == 0:
try:
init_rect = cv2.selectROI(video_name, img_raw, False, False)
except Exception:
exit()
target_pos, target_sz = rect1_2_cxy_wh(init_rect)
min_sz = np.maximum(config.min_scale_factor * target_sz, 4)
max_sz = np.minimum(im.shape[:2], config.max_scale_factor * target_sz)
# crop template
window_sz = target_sz * (1 + config.padding)
bbox = cxy_wh_2_bbox(target_pos, window_sz)
patch = crop_chw(im, bbox, config.crop_sz)
target = patch
net.update(torch.Tensor(np.expand_dims(target, axis=0)).cuda(), lr=1)
regions.append(cxy_wh_2_rect1(target_pos, target_sz))
patch_crop = np.zeros((config.num_scale, patch.shape[0],
patch.shape[1], patch.shape[2]), np.float32)
# Track
else:
for i in range(config.num_scale): # crop multi-scale search region
window_sz = target_sz * (config.scale_factor[i] * (1 + config.padding))
bbox = cxy_wh_2_bbox(target_pos, window_sz)
patch_crop[i, :] = crop_chw(im, bbox, config.crop_sz)
search = patch_crop
response = net(torch.Tensor(search).cuda())
net_output_size = [response.shape[-2], response.shape[-1]]
peak, idx = torch.max(response.view(config.num_scale, -1), 1)
peak = peak.data.cpu().numpy() * config.scale_penalties
best_scale = np.argmax(peak)
r_max, c_max = np.unravel_index(idx[best_scale].cpu(), net_output_size)
r_max = r_max - net_output_size[0] * 0.5
c_max = c_max - net_output_size[1] * 0.5
window_sz = target_sz * (config.scale_factor[best_scale] * (1 + config.padding))
target_pos = target_pos + np.array([c_max, r_max]) * window_sz / net_output_size
target_sz = np.minimum(np.maximum(window_sz / (1 + config.padding), min_sz), max_sz)
# model update
window_sz = target_sz * (1 + config.padding)
bbox = cxy_wh_2_bbox(target_pos, window_sz)
patch = crop_chw(im, bbox, config.crop_sz)
target = patch
regions.append(cxy_wh_2_rect1(target_pos, target_sz)) # 1-index
toc += cv2.getTickCount() - tic
bbox = list(map(int, regions[-1]))
cv2.rectangle(img_raw, (bbox[0], bbox[1]),
(bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 3)
cv2.imshow(video_name, img_raw)
cv2.waitKey(40)
toc /= cv2.getTickFrequency()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='', required=True, type=str)
parser.add_argument('--input', required=True, type=str)
args = parser.parse_args()
with open(args.config) as f:
common_args = yaml.load(f)
for k, v in common_args['common'].items():
setattr(args, k, v)
for k, v in common_args['sot'].items():
setattr(args, k, v)
args.arch = 'SiamFC'
# prepare model
base = AppearanceModel(args).to(args.device)
print('Total params: %.2fM' %
(sum(p.numel() for p in base.parameters())/1e6))
print(base)
net = models.__dict__[args.arch](base=base, config=TrackerConfig())
net.eval()
net = net.cuda()
track(net, args)
if __name__ == '__main__':
main() | demo/sot_demo.py |
import os
import pdb
import sys
sys.path[0] = os.getcwd()
import cv2
import yaml
import argparse
from PIL import Image
from glob import glob
from os.path import exists, join
from easydict import EasyDict as edict
import torch
import numpy as np
import tracker.sot.lib.models as models
from tracker.sot.lib.utils.utils import load_dataset, crop_chw, \
gaussian_shaped_labels, cxy_wh_2_rect1, rect1_2_cxy_wh, cxy_wh_2_bbox
from tracker.sot.lib.core.eval_otb import eval_auc_tune
import utils
from model import AppearanceModel, partial_load
from data.vos import color_normalize, load_image, im_to_numpy, im_to_torch
def get_frames(video_name):
if not video_name:
cap = cv2.VideoCapture(0)
# warmup
for i in range(5):
cap.read()
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
elif video_name.endswith('avi') or video_name.endswith('mp4'):
cap = cv2.VideoCapture(video_name)
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
else:
images = glob(os.path.join(video_name, '*.jp*'))
images = sorted(images,
key=lambda x: int(x.split('/')[-1].split('.')[0]))
for img in images:
frame = cv2.imread(img)
yield frame
def preproc(img, im_mean, im_std, use_lab=False):
img = load_image(img)
if use_lab:
img = im_to_numpy(img)
img = (img*255).astype(np.uint8)[:, :, ::-1]
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
img = im_to_torch(img) / 255.
img = color_normalize(img, im_mean, im_std)
if use_lab:
img = torch.stack([img[0], ]*3)
img = img.permute(1, 2, 0).numpy() # H, W, C
return img
class TrackerConfig(object):
crop_sz = 512 + 8
downscale = 8
temp_sz = crop_sz // downscale
lambda0 = 1e-4
padding = 3.5
interp_factor = 0.01
num_scale = 3
scale_step = 1.0275
scale_factor = scale_step ** (np.arange(num_scale) - num_scale // 2)
min_scale_factor = 0.2
max_scale_factor = 5
scale_penalty = 0.985
scale_penalties = scale_penalty ** (np.abs((np.arange(num_scale) - num_scale // 2)))
net_output_size = [temp_sz, temp_sz]
cos_window = torch.Tensor(np.outer(np.hanning(temp_sz), np.hanning(temp_sz))).cuda()
def track(net, args):
toc = 0
config = TrackerConfig()
video_name = os.path.basename(args.input) if args.input else 'webcam'
regions = [] # FINAL RESULTS
for f, img_raw in enumerate(get_frames(args.input)):
img_raw = cv2.resize(img_raw, (640,480))
use_lab = getattr(args, 'use_lab', False)
im = preproc(img_raw, args.im_mean, args.im_std, use_lab)
tic = cv2.getTickCount()
# Init
if f == 0:
try:
init_rect = cv2.selectROI(video_name, img_raw, False, False)
except Exception:
exit()
target_pos, target_sz = rect1_2_cxy_wh(init_rect)
min_sz = np.maximum(config.min_scale_factor * target_sz, 4)
max_sz = np.minimum(im.shape[:2], config.max_scale_factor * target_sz)
# crop template
window_sz = target_sz * (1 + config.padding)
bbox = cxy_wh_2_bbox(target_pos, window_sz)
patch = crop_chw(im, bbox, config.crop_sz)
target = patch
net.update(torch.Tensor(np.expand_dims(target, axis=0)).cuda(), lr=1)
regions.append(cxy_wh_2_rect1(target_pos, target_sz))
patch_crop = np.zeros((config.num_scale, patch.shape[0],
patch.shape[1], patch.shape[2]), np.float32)
# Track
else:
for i in range(config.num_scale): # crop multi-scale search region
window_sz = target_sz * (config.scale_factor[i] * (1 + config.padding))
bbox = cxy_wh_2_bbox(target_pos, window_sz)
patch_crop[i, :] = crop_chw(im, bbox, config.crop_sz)
search = patch_crop
response = net(torch.Tensor(search).cuda())
net_output_size = [response.shape[-2], response.shape[-1]]
peak, idx = torch.max(response.view(config.num_scale, -1), 1)
peak = peak.data.cpu().numpy() * config.scale_penalties
best_scale = np.argmax(peak)
r_max, c_max = np.unravel_index(idx[best_scale].cpu(), net_output_size)
r_max = r_max - net_output_size[0] * 0.5
c_max = c_max - net_output_size[1] * 0.5
window_sz = target_sz * (config.scale_factor[best_scale] * (1 + config.padding))
target_pos = target_pos + np.array([c_max, r_max]) * window_sz / net_output_size
target_sz = np.minimum(np.maximum(window_sz / (1 + config.padding), min_sz), max_sz)
# model update
window_sz = target_sz * (1 + config.padding)
bbox = cxy_wh_2_bbox(target_pos, window_sz)
patch = crop_chw(im, bbox, config.crop_sz)
target = patch
regions.append(cxy_wh_2_rect1(target_pos, target_sz)) # 1-index
toc += cv2.getTickCount() - tic
bbox = list(map(int, regions[-1]))
cv2.rectangle(img_raw, (bbox[0], bbox[1]),
(bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 0), 3)
cv2.imshow(video_name, img_raw)
cv2.waitKey(40)
toc /= cv2.getTickFrequency()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='', required=True, type=str)
parser.add_argument('--input', required=True, type=str)
args = parser.parse_args()
with open(args.config) as f:
common_args = yaml.load(f)
for k, v in common_args['common'].items():
setattr(args, k, v)
for k, v in common_args['sot'].items():
setattr(args, k, v)
args.arch = 'SiamFC'
# prepare model
base = AppearanceModel(args).to(args.device)
print('Total params: %.2fM' %
(sum(p.numel() for p in base.parameters())/1e6))
print(base)
net = models.__dict__[args.arch](base=base, config=TrackerConfig())
net.eval()
net = net.cuda()
track(net, args)
if __name__ == '__main__':
main() | 0.250729 | 0.136752 |
import logging
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
ATTR_TEMPERATURE
)
from . import DOMAIN, SIGNAL_STATE_UPDATED
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from .const import (NAME, VERSION, MANUFACTURER)
from homeassistant.components.climate import (
ClimateEntity)
from homeassistant.components.climate.const import (
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_FAN_MODE,
HVAC_MODE_HEAT, HVAC_MODE_FAN_ONLY,
CURRENT_HVAC_OFF, CURRENT_HVAC_HEAT, CURRENT_HVAC_FAN)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
TARGET_TEMPERATURE_STEP = 1
ATTR_OUTSIDE_TEMP = 'outside'
ATTR_INSIDE_TEMP = 'inside'
ATTR_EXHAUST_TEMP = 'exhaust'
ATTR_INCOMING_TEMP = 'incoming'
_LOGGER = logging.getLogger(__name__)
ha_to_me = {HVAC_MODE_HEAT: 'HEAT', HVAC_MODE_FAN_ONLY: 'FAN'}
me_to_ha = {v: k for k, v in ha_to_me.items()}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
add_entities([ValloxDigitClimate(hass)])
async def async_setup_entry(hass, entry, async_add_devices):
"""Setup sensor platform."""
async_add_devices([
ValloxDigitClimate(hass, entry)
])
class ValloxDigitClimate(ClimateEntity):
"""Representation of a sensor."""
def __init__(self, hass, entry):
"""Initialize the sensor."""
self._entry = entry
self._vallox2mqtt = hass.data[DOMAIN][entry.entry_id]
self._state = None
self._config_entry = entry
async def update_data(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Handle being added to home assistant."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(self.hass, SIGNAL_STATE_UPDATED, self.update_data)
)
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{DOMAIN}_climate"
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def target_temperature_step(self):
"""Return the target temperature step."""
return TARGET_TEMPERATURE_STEP
@property
def should_poll(self):
"""Polling not needed for a demo climate device."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._vallox2mqtt._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._vallox2mqtt._target_temperature
@property
def current_temperature(self):
"""Return the current temperature."""
return self._vallox2mqtt._current_temperature
@property
def fan_mode(self):
"""Return the fan setting."""
if self._vallox2mqtt._fan_mode is None:
return
return self._vallox2mqtt._fan_mode.capitalize()
@property
def fan_modes(self):
"""List of available fan modes."""
return [k.capitalize() for k in self._vallox2mqtt._fan_modes]
@property
def hvac_action(self):
if self._vallox2mqtt._hvac_mode == 'FAN':
return CURRENT_HVAC_FAN
if self._vallox2mqtt._hvac_mode == 'HEAT':
return CURRENT_HVAC_HEAT
@property
def available(self):
"""Flag to inform availability"""
return True
@property
def hvac_mode(self):
"""Return current operation (heat, fan)"""
return me_to_ha[self._vallox2mqtt._hvac_mode]
@property
def hvac_modes(self):
"""List of available operation modes."""
return [me_to_ha[k] for k in self._vallox2mqtt._hvac_modes]
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self._config_entry.entry_id)},
"name": NAME,
"model": VERSION,
"manufacturer": MANUFACTURER
}
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
# This is also be set via the mqtt callback
self._vallox2mqtt._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self._vallox2mqtt._publish_temperature()
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
if fan_mode is not None:
self._vallox2mqtt._fan_mode = fan_mode.upper()
self._vallox2mqtt._publish_fan_mode()
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new operating mode."""
if hvac_mode is not None:
self._vallox2mqtt._hvac_mode = ha_to_me[hvac_mode]
self._vallox2mqtt._publish_hvac_mode()
self.async_write_ha_state() | custom_components/vallox2mqtt/climate.py | import logging
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
ATTR_TEMPERATURE
)
from . import DOMAIN, SIGNAL_STATE_UPDATED
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from .const import (NAME, VERSION, MANUFACTURER)
from homeassistant.components.climate import (
ClimateEntity)
from homeassistant.components.climate.const import (
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_FAN_MODE,
HVAC_MODE_HEAT, HVAC_MODE_FAN_ONLY,
CURRENT_HVAC_OFF, CURRENT_HVAC_HEAT, CURRENT_HVAC_FAN)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
TARGET_TEMPERATURE_STEP = 1
ATTR_OUTSIDE_TEMP = 'outside'
ATTR_INSIDE_TEMP = 'inside'
ATTR_EXHAUST_TEMP = 'exhaust'
ATTR_INCOMING_TEMP = 'incoming'
_LOGGER = logging.getLogger(__name__)
ha_to_me = {HVAC_MODE_HEAT: 'HEAT', HVAC_MODE_FAN_ONLY: 'FAN'}
me_to_ha = {v: k for k, v in ha_to_me.items()}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
add_entities([ValloxDigitClimate(hass)])
async def async_setup_entry(hass, entry, async_add_devices):
"""Setup sensor platform."""
async_add_devices([
ValloxDigitClimate(hass, entry)
])
class ValloxDigitClimate(ClimateEntity):
"""Representation of a sensor."""
def __init__(self, hass, entry):
"""Initialize the sensor."""
self._entry = entry
self._vallox2mqtt = hass.data[DOMAIN][entry.entry_id]
self._state = None
self._config_entry = entry
async def update_data(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Handle being added to home assistant."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(self.hass, SIGNAL_STATE_UPDATED, self.update_data)
)
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{DOMAIN}_climate"
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def target_temperature_step(self):
"""Return the target temperature step."""
return TARGET_TEMPERATURE_STEP
@property
def should_poll(self):
"""Polling not needed for a demo climate device."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._vallox2mqtt._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._vallox2mqtt._target_temperature
@property
def current_temperature(self):
"""Return the current temperature."""
return self._vallox2mqtt._current_temperature
@property
def fan_mode(self):
"""Return the fan setting."""
if self._vallox2mqtt._fan_mode is None:
return
return self._vallox2mqtt._fan_mode.capitalize()
@property
def fan_modes(self):
"""List of available fan modes."""
return [k.capitalize() for k in self._vallox2mqtt._fan_modes]
@property
def hvac_action(self):
if self._vallox2mqtt._hvac_mode == 'FAN':
return CURRENT_HVAC_FAN
if self._vallox2mqtt._hvac_mode == 'HEAT':
return CURRENT_HVAC_HEAT
@property
def available(self):
"""Flag to inform availability"""
return True
@property
def hvac_mode(self):
"""Return current operation (heat, fan)"""
return me_to_ha[self._vallox2mqtt._hvac_mode]
@property
def hvac_modes(self):
"""List of available operation modes."""
return [me_to_ha[k] for k in self._vallox2mqtt._hvac_modes]
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self._config_entry.entry_id)},
"name": NAME,
"model": VERSION,
"manufacturer": MANUFACTURER
}
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
# This is also be set via the mqtt callback
self._vallox2mqtt._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self._vallox2mqtt._publish_temperature()
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
if fan_mode is not None:
self._vallox2mqtt._fan_mode = fan_mode.upper()
self._vallox2mqtt._publish_fan_mode()
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new operating mode."""
if hvac_mode is not None:
self._vallox2mqtt._hvac_mode = ha_to_me[hvac_mode]
self._vallox2mqtt._publish_hvac_mode()
self.async_write_ha_state() | 0.812161 | 0.147524 |
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class CorporationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_corporations_corporation_id(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation information # noqa: E501
Public information about a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation information # noqa: E501
Public information about a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v4/corporations/{corporation_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_alliancehistory(self, corporation_id, **kwargs): # noqa: E501
"""Get alliance history # noqa: E501
Get a list of all the alliances a corporation has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_alliancehistory(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdAlliancehistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_alliancehistory_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get alliance history # noqa: E501
Get a list of all the alliances a corporation has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdAlliancehistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_alliancehistory" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_alliancehistory`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_alliancehistory`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/alliancehistory/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdAlliancehistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_blueprints(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation blueprints # noqa: E501
Returns a list of blueprints the corporation owns --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_blueprints(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_blueprints_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation blueprints # noqa: E501
Returns a list of blueprints the corporation owns --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_blueprints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_blueprints`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_blueprints`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/blueprints/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdBlueprints200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_containers_logs(self, corporation_id, **kwargs): # noqa: E501
"""Get all corporation ALSC logs # noqa: E501
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation --- This route is cached for up to 600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_containers_logs(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContainersLogs200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_containers_logs_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get all corporation ALSC logs # noqa: E501
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation --- This route is cached for up to 600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContainersLogs200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_containers_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_containers_logs`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_containers_logs`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/containers/logs/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdContainersLogs200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_divisions(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation divisions # noqa: E501
Return corporation hangar and wallet division names, only show if a division is not using the default name --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_divisions(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdDivisionsOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_divisions_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_divisions_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_divisions_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation divisions # noqa: E501
Return corporation hangar and wallet division names, only show if a division is not using the default name --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_divisions_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdDivisionsOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_divisions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_divisions`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_divisions`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/divisions/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdDivisionsOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_facilities(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation facilities # noqa: E501
Return a corporation's facilities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Factory_Manager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_facilities(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdFacilities200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_facilities_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_facilities_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_facilities_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation facilities # noqa: E501
Return a corporation's facilities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Factory_Manager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_facilities_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdFacilities200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_facilities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_facilities`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_facilities`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/facilities/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdFacilities200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_icons(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation icon # noqa: E501
Get the icon urls for a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_icons(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdIconsOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_icons_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_icons_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_icons_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation icon # noqa: E501
Get the icon urls for a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_icons_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdIconsOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_icons" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_icons`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_icons`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/icons/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdIconsOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_medals(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation medals # noqa: E501
Returns a corporation's medals --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_medals_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_medals_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_medals_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation medals # noqa: E501
Returns a corporation's medals --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_medals" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_medals`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_medals`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/medals/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMedals200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_medals_issued(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation issued medals # noqa: E501
Returns medals issued by a corporation --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals_issued(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedalsIssued200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_medals_issued_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_medals_issued_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_medals_issued_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation issued medals # noqa: E501
Returns medals issued by a corporation --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals_issued_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedalsIssued200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_medals_issued" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_medals_issued`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_medals_issued`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/medals/issued/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMedalsIssued200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_members(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation members # noqa: E501
Return the current member list of a corporation, the token's character need to be a member of the corporation. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_members_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_members_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_members_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation members # noqa: E501
Return the current member list of a corporation, the token's character need to be a member of the corporation. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_members" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_members`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_members`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v3/corporations/{corporation_id}/members/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_members_limit(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member limit # noqa: E501
Return a corporation's member limit, not including CEO himself --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_limit(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_members_limit_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_members_limit_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_members_limit_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member limit # noqa: E501
Return a corporation's member limit, not including CEO himself --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_limit_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_members_limit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_members_limit`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_members_limit`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/members/limit/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_members_titles(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation's members' titles # noqa: E501
Returns a corporation's members' titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_titles(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembersTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_members_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_members_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_members_titles_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation's members' titles # noqa: E501
Returns a corporation's members' titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_titles_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembersTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_members_titles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_members_titles`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_members_titles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/members/titles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMembersTitles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_membertracking(self, corporation_id, **kwargs): # noqa: E501
"""Track corporation members # noqa: E501
Returns additional information about a corporation's members which helps tracking their activities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_membertracking(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembertracking200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_membertracking_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_membertracking_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_membertracking_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Track corporation members # noqa: E501
Returns additional information about a corporation's members which helps tracking their activities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_membertracking_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembertracking200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_membertracking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_membertracking`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_membertracking`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/membertracking/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMembertracking200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_outposts(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation outposts # noqa: E501
Get a list of corporation outpost IDs Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_outposts_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_outposts_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_outposts_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation outposts # noqa: E501
Get a list of corporation outpost IDs Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_outposts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_outposts`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_outposts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/outposts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_outposts_outpost_id(self, corporation_id, outpost_id, **kwargs): # noqa: E501
"""Get corporation outpost details # noqa: E501
Get details about a given outpost. Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts_outpost_id(corporation_id, outpost_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int outpost_id: A station (outpost) ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOutpostsOutpostIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_outposts_outpost_id_with_http_info(corporation_id, outpost_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_outposts_outpost_id_with_http_info(corporation_id, outpost_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_outposts_outpost_id_with_http_info(self, corporation_id, outpost_id, **kwargs): # noqa: E501
"""Get corporation outpost details # noqa: E501
Get details about a given outpost. Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts_outpost_id_with_http_info(corporation_id, outpost_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int outpost_id: A station (outpost) ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOutpostsOutpostIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'outpost_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_outposts_outpost_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_outposts_outpost_id`") # noqa: E501
# verify the required parameter 'outpost_id' is set
if ('outpost_id' not in params or
params['outpost_id'] is None):
raise ValueError("Missing the required parameter `outpost_id` when calling `get_corporations_corporation_id_outposts_outpost_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_outposts_outpost_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
if 'outpost_id' in params:
path_params['outpost_id'] = params['outpost_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/outposts/{outpost_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdOutpostsOutpostIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_roles(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles # noqa: E501
Return the roles of all members if the character has the personnel manager role or any grantable role. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRoles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_roles_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_roles_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_roles_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles # noqa: E501
Return the roles of all members if the character has the personnel manager role or any grantable role. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRoles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_roles`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_roles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/roles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdRoles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_roles_history(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles history # noqa: E501
Return how roles have changed for a coporation's members, up to a month --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles_history(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRolesHistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_roles_history_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles history # noqa: E501
Return how roles have changed for a coporation's members, up to a month --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRolesHistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_roles_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_roles_history`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_roles_history`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/roles/history/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdRolesHistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_shareholders(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation shareholders # noqa: E501
Return the current shareholders of a corporation. --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_shareholders(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdShareholders200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_shareholders_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_shareholders_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_shareholders_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation shareholders # noqa: E501
Return the current shareholders of a corporation. --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_shareholders_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdShareholders200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_shareholders" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_shareholders`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_shareholders`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/shareholders/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdShareholders200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_standings(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation standings # noqa: E501
Return corporation standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_standings(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_standings_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_standings_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_standings_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation standings # noqa: E501
Return corporation standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_standings_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_standings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_standings`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_standings`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/standings/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdStandings200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_starbases(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation starbases (POSes) # noqa: E501
Returns list of corporation starbases (POSes) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStarbases200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_starbases_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_starbases_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_starbases_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation starbases (POSes) # noqa: E501
Returns list of corporation starbases (POSes) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStarbases200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_starbases" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_starbases`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_starbases`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/starbases/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdStarbases200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_starbases_starbase_id(self, corporation_id, starbase_id, system_id, **kwargs): # noqa: E501
"""Get starbase (POS) detail # noqa: E501
Returns various settings and fuels of a starbase (POS) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases_starbase_id(corporation_id, starbase_id, system_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int starbase_id: An EVE starbase (POS) ID (required)
:param int system_id: The solar system this starbase (POS) is located in, (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdStarbasesStarbaseIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_starbases_starbase_id_with_http_info(corporation_id, starbase_id, system_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_starbases_starbase_id_with_http_info(corporation_id, starbase_id, system_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_starbases_starbase_id_with_http_info(self, corporation_id, starbase_id, system_id, **kwargs): # noqa: E501
"""Get starbase (POS) detail # noqa: E501
Returns various settings and fuels of a starbase (POS) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases_starbase_id_with_http_info(corporation_id, starbase_id, system_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int starbase_id: An EVE starbase (POS) ID (required)
:param int system_id: The solar system this starbase (POS) is located in, (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdStarbasesStarbaseIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'starbase_id', 'system_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_starbases_starbase_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_starbases_starbase_id`") # noqa: E501
# verify the required parameter 'starbase_id' is set
if ('starbase_id' not in params or
params['starbase_id'] is None):
raise ValueError("Missing the required parameter `starbase_id` when calling `get_corporations_corporation_id_starbases_starbase_id`") # noqa: E501
# verify the required parameter 'system_id' is set
if ('system_id' not in params or
params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `get_corporations_corporation_id_starbases_starbase_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_starbases_starbase_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
if 'starbase_id' in params:
path_params['starbase_id'] = params['starbase_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'system_id' in params:
query_params.append(('system_id', params['system_id'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/starbases/{starbase_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdStarbasesStarbaseIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_structures(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation structures # noqa: E501
Get a list of corporation structures. This route's version includes the changes to structures detailed in this blog: https://www.eveonline.com/article/upwell-2.0-structures-changes-coming-on-february-13th --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): StationManager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_structures(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str language: Language to use in the response
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStructures200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_structures_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_structures_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_structures_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation structures # noqa: E501
Get a list of corporation structures. This route's version includes the changes to structures detailed in this blog: https://www.eveonline.com/article/upwell-2.0-structures-changes-coming-on-february-13th --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): StationManager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_structures_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str language: Language to use in the response
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStructures200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'language', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_structures" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_structures`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_structures`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'language' in params:
query_params.append(('language', params['language'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/structures/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdStructures200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_titles(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation titles # noqa: E501
Returns a corporation's titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_titles(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_titles_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation titles # noqa: E501
Returns a corporation's titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_titles_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_titles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_titles`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_titles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/titles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdTitles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_names(self, corporation_ids, **kwargs): # noqa: E501
"""Get corporation names # noqa: E501
Resolve a set of corporation IDs to corporation names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_names(corporation_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] corporation_ids: A comma separated list of corporation IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_names_with_http_info(corporation_ids, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_names_with_http_info(corporation_ids, **kwargs) # noqa: E501
return data
def get_corporations_names_with_http_info(self, corporation_ids, **kwargs): # noqa: E501
"""Get corporation names # noqa: E501
Resolve a set of corporation IDs to corporation names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_names_with_http_info(corporation_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] corporation_ids: A comma separated list of corporation IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_ids', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_names" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_ids' is set
if ('corporation_ids' not in params or
params['corporation_ids'] is None):
raise ValueError("Missing the required parameter `corporation_ids` when calling `get_corporations_names`") # noqa: E501
if ('corporation_ids' in params and
len(params['corporation_ids']) > 100):
raise ValueError("Invalid value for parameter `corporation_ids` when calling `get_corporations_names`, number of items must be less than or equal to `100`") # noqa: E501
if ('corporation_ids' in params and
len(params['corporation_ids']) < 1):
raise ValueError("Invalid value for parameter `corporation_ids` when calling `get_corporations_names`, number of items must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'corporation_ids' in params:
query_params.append(('corporation_ids', params['corporation_ids'])) # noqa: E501
collection_formats['corporation_ids'] = 'csv' # noqa: E501
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/names/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsNames200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_npccorps(self, **kwargs): # noqa: E501
"""Get npc corporations # noqa: E501
Get a list of npc corporations --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_npccorps(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_npccorps_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_corporations_npccorps_with_http_info(**kwargs) # noqa: E501
return data
def get_corporations_npccorps_with_http_info(self, **kwargs): # noqa: E501
"""Get npc corporations # noqa: E501
Get a list of npc corporations --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_npccorps_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_npccorps" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/npccorps/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | swagger_client/api/corporation_api.py | from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class CorporationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_corporations_corporation_id(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation information # noqa: E501
Public information about a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation information # noqa: E501
Public information about a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v4/corporations/{corporation_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_alliancehistory(self, corporation_id, **kwargs): # noqa: E501
"""Get alliance history # noqa: E501
Get a list of all the alliances a corporation has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_alliancehistory(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdAlliancehistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_alliancehistory_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get alliance history # noqa: E501
Get a list of all the alliances a corporation has been a member of --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_alliancehistory_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdAlliancehistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_alliancehistory" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_alliancehistory`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_alliancehistory`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/alliancehistory/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdAlliancehistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_blueprints(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation blueprints # noqa: E501
Returns a list of blueprints the corporation owns --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_blueprints(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_blueprints_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation blueprints # noqa: E501
Returns a list of blueprints the corporation owns --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_blueprints_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_blueprints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_blueprints`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_blueprints`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/blueprints/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdBlueprints200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_containers_logs(self, corporation_id, **kwargs): # noqa: E501
"""Get all corporation ALSC logs # noqa: E501
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation --- This route is cached for up to 600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_containers_logs(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContainersLogs200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_containers_logs_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get all corporation ALSC logs # noqa: E501
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation --- This route is cached for up to 600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContainersLogs200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_containers_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_containers_logs`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_containers_logs`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/containers/logs/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdContainersLogs200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_divisions(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation divisions # noqa: E501
Return corporation hangar and wallet division names, only show if a division is not using the default name --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_divisions(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdDivisionsOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_divisions_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_divisions_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_divisions_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation divisions # noqa: E501
Return corporation hangar and wallet division names, only show if a division is not using the default name --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_divisions_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdDivisionsOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_divisions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_divisions`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_divisions`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/divisions/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdDivisionsOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_facilities(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation facilities # noqa: E501
Return a corporation's facilities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Factory_Manager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_facilities(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdFacilities200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_facilities_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_facilities_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_facilities_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation facilities # noqa: E501
Return a corporation's facilities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Factory_Manager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_facilities_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdFacilities200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_facilities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_facilities`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_facilities`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/facilities/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdFacilities200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_icons(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation icon # noqa: E501
Get the icon urls for a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_icons(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdIconsOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_icons_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_icons_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_icons_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation icon # noqa: E501
Get the icon urls for a corporation --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_icons_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdIconsOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_icons" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_icons`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_icons`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/icons/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdIconsOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_medals(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation medals # noqa: E501
Returns a corporation's medals --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_medals_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_medals_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_medals_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation medals # noqa: E501
Returns a corporation's medals --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedals200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_medals" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_medals`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_medals`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/medals/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMedals200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_medals_issued(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation issued medals # noqa: E501
Returns medals issued by a corporation --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals_issued(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedalsIssued200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_medals_issued_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_medals_issued_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_medals_issued_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation issued medals # noqa: E501
Returns medals issued by a corporation --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_medals_issued_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMedalsIssued200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_medals_issued" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_medals_issued`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_medals_issued`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/medals/issued/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMedalsIssued200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_members(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation members # noqa: E501
Return the current member list of a corporation, the token's character need to be a member of the corporation. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_members_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_members_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_members_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation members # noqa: E501
Return the current member list of a corporation, the token's character need to be a member of the corporation. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_members" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_members`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_members`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v3/corporations/{corporation_id}/members/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_members_limit(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member limit # noqa: E501
Return a corporation's member limit, not including CEO himself --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_limit(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_members_limit_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_members_limit_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_members_limit_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member limit # noqa: E501
Return a corporation's member limit, not including CEO himself --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_limit_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_members_limit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_members_limit`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_members_limit`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/members/limit/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_members_titles(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation's members' titles # noqa: E501
Returns a corporation's members' titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_titles(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembersTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_members_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_members_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_members_titles_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation's members' titles # noqa: E501
Returns a corporation's members' titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_members_titles_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembersTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_members_titles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_members_titles`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_members_titles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/members/titles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMembersTitles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_membertracking(self, corporation_id, **kwargs): # noqa: E501
"""Track corporation members # noqa: E501
Returns additional information about a corporation's members which helps tracking their activities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_membertracking(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembertracking200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_membertracking_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_membertracking_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_membertracking_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Track corporation members # noqa: E501
Returns additional information about a corporation's members which helps tracking their activities --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_membertracking_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdMembertracking200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_membertracking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_membertracking`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_membertracking`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/membertracking/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdMembertracking200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_outposts(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation outposts # noqa: E501
Get a list of corporation outpost IDs Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_outposts_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_outposts_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_outposts_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation outposts # noqa: E501
Get a list of corporation outpost IDs Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_outposts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_outposts`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_outposts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/outposts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_outposts_outpost_id(self, corporation_id, outpost_id, **kwargs): # noqa: E501
"""Get corporation outpost details # noqa: E501
Get details about a given outpost. Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts_outpost_id(corporation_id, outpost_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int outpost_id: A station (outpost) ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOutpostsOutpostIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_outposts_outpost_id_with_http_info(corporation_id, outpost_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_outposts_outpost_id_with_http_info(corporation_id, outpost_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_outposts_outpost_id_with_http_info(self, corporation_id, outpost_id, **kwargs): # noqa: E501
"""Get corporation outpost details # noqa: E501
Get details about a given outpost. Note: This endpoint will be removed once outposts are migrated to Citadels as talked about in this blog: https://community.eveonline.com/news/dev-blogs/the-next-steps-in-structure-transition/ --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_outposts_outpost_id_with_http_info(corporation_id, outpost_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int outpost_id: A station (outpost) ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdOutpostsOutpostIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'outpost_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_outposts_outpost_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_outposts_outpost_id`") # noqa: E501
# verify the required parameter 'outpost_id' is set
if ('outpost_id' not in params or
params['outpost_id'] is None):
raise ValueError("Missing the required parameter `outpost_id` when calling `get_corporations_corporation_id_outposts_outpost_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_outposts_outpost_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
if 'outpost_id' in params:
path_params['outpost_id'] = params['outpost_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/outposts/{outpost_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdOutpostsOutpostIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_roles(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles # noqa: E501
Return the roles of all members if the character has the personnel manager role or any grantable role. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRoles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_roles_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_roles_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_roles_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles # noqa: E501
Return the roles of all members if the character has the personnel manager role or any grantable role. --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRoles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_roles`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_roles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/roles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdRoles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_roles_history(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles history # noqa: E501
Return how roles have changed for a coporation's members, up to a month --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles_history(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRolesHistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_roles_history_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation member roles history # noqa: E501
Return how roles have changed for a coporation's members, up to a month --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_roles_history_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdRolesHistory200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_roles_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_roles_history`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_roles_history`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/roles/history/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdRolesHistory200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_shareholders(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation shareholders # noqa: E501
Return the current shareholders of a corporation. --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_shareholders(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdShareholders200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_shareholders_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_shareholders_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_shareholders_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation shareholders # noqa: E501
Return the current shareholders of a corporation. --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_shareholders_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdShareholders200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_shareholders" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_shareholders`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_shareholders`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/shareholders/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdShareholders200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_standings(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation standings # noqa: E501
Return corporation standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_standings(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_standings_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_standings_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_standings_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation standings # noqa: E501
Return corporation standings from agents, NPC corporations, and factions --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_standings_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStandings200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_standings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_standings`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_standings`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/standings/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdStandings200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_starbases(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation starbases (POSes) # noqa: E501
Returns list of corporation starbases (POSes) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStarbases200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_starbases_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_starbases_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_starbases_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation starbases (POSes) # noqa: E501
Returns list of corporation starbases (POSes) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStarbases200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_starbases" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_starbases`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_starbases`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/starbases/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdStarbases200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_starbases_starbase_id(self, corporation_id, starbase_id, system_id, **kwargs): # noqa: E501
"""Get starbase (POS) detail # noqa: E501
Returns various settings and fuels of a starbase (POS) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases_starbase_id(corporation_id, starbase_id, system_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int starbase_id: An EVE starbase (POS) ID (required)
:param int system_id: The solar system this starbase (POS) is located in, (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdStarbasesStarbaseIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_starbases_starbase_id_with_http_info(corporation_id, starbase_id, system_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_starbases_starbase_id_with_http_info(corporation_id, starbase_id, system_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_starbases_starbase_id_with_http_info(self, corporation_id, starbase_id, system_id, **kwargs): # noqa: E501
"""Get starbase (POS) detail # noqa: E501
Returns various settings and fuels of a starbase (POS) --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_starbases_starbase_id_with_http_info(corporation_id, starbase_id, system_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param int starbase_id: An EVE starbase (POS) ID (required)
:param int system_id: The solar system this starbase (POS) is located in, (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdStarbasesStarbaseIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'starbase_id', 'system_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_starbases_starbase_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_starbases_starbase_id`") # noqa: E501
# verify the required parameter 'starbase_id' is set
if ('starbase_id' not in params or
params['starbase_id'] is None):
raise ValueError("Missing the required parameter `starbase_id` when calling `get_corporations_corporation_id_starbases_starbase_id`") # noqa: E501
# verify the required parameter 'system_id' is set
if ('system_id' not in params or
params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `get_corporations_corporation_id_starbases_starbase_id`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_starbases_starbase_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
if 'starbase_id' in params:
path_params['starbase_id'] = params['starbase_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'system_id' in params:
query_params.append(('system_id', params['system_id'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/starbases/{starbase_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCorporationsCorporationIdStarbasesStarbaseIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_structures(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation structures # noqa: E501
Get a list of corporation structures. This route's version includes the changes to structures detailed in this blog: https://www.eveonline.com/article/upwell-2.0-structures-changes-coming-on-february-13th --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): StationManager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_structures(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str language: Language to use in the response
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStructures200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_structures_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_structures_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_structures_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation structures # noqa: E501
Get a list of corporation structures. This route's version includes the changes to structures detailed in this blog: https://www.eveonline.com/article/upwell-2.0-structures-changes-coming-on-february-13th --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): StationManager # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_structures_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str language: Language to use in the response
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdStructures200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'language', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_structures" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_structures`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_structures`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'language' in params:
query_params.append(('language', params['language'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/structures/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdStructures200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_titles(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation titles # noqa: E501
Returns a corporation's titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_titles(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_titles_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_titles_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation titles # noqa: E501
Returns a corporation's titles --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_titles_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdTitles200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_titles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_titles`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_titles`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/titles/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdTitles200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_names(self, corporation_ids, **kwargs): # noqa: E501
"""Get corporation names # noqa: E501
Resolve a set of corporation IDs to corporation names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_names(corporation_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] corporation_ids: A comma separated list of corporation IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_names_with_http_info(corporation_ids, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_names_with_http_info(corporation_ids, **kwargs) # noqa: E501
return data
def get_corporations_names_with_http_info(self, corporation_ids, **kwargs): # noqa: E501
"""Get corporation names # noqa: E501
Resolve a set of corporation IDs to corporation names --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_names_with_http_info(corporation_ids, async=True)
>>> result = thread.get()
:param async bool
:param list[int] corporation_ids: A comma separated list of corporation IDs (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsNames200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_ids', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_names" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_ids' is set
if ('corporation_ids' not in params or
params['corporation_ids'] is None):
raise ValueError("Missing the required parameter `corporation_ids` when calling `get_corporations_names`") # noqa: E501
if ('corporation_ids' in params and
len(params['corporation_ids']) > 100):
raise ValueError("Invalid value for parameter `corporation_ids` when calling `get_corporations_names`, number of items must be less than or equal to `100`") # noqa: E501
if ('corporation_ids' in params and
len(params['corporation_ids']) < 1):
raise ValueError("Invalid value for parameter `corporation_ids` when calling `get_corporations_names`, number of items must be greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'corporation_ids' in params:
query_params.append(('corporation_ids', params['corporation_ids'])) # noqa: E501
collection_formats['corporation_ids'] = 'csv' # noqa: E501
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/names/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsNames200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_npccorps(self, **kwargs): # noqa: E501
"""Get npc corporations # noqa: E501
Get a list of npc corporations --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_npccorps(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_npccorps_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_corporations_npccorps_with_http_info(**kwargs) # noqa: E501
return data
def get_corporations_npccorps_with_http_info(self, **kwargs): # noqa: E501
"""Get npc corporations # noqa: E501
Get a list of npc corporations --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_npccorps_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_npccorps" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/npccorps/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | 0.715523 | 0.062046 |
from utils import *
import tkinter
import time
COLOUR_BACKGROUND = "#DAB887"
COLOUR_BLACK = "#251507"
COLOUR_WHITE = "#F7F0DF"
COLOUR_NORMAL = "#A96020"
COLOUR_MARKED = "#AF2020"
class Graphics:
# Constructor of the class
def __init__(self, Callback):
self.board = [0] * 51 # stores the board pieces
self.stone = [0] * 51 # stores the stone objects
self.Callback = Callback
self.window = tkinter.Tk(className = "Draughts")
self.window.resizable(width = False, height = False)
self.canvas = tkinter.Canvas(self.window, width = 500, height = 500, background = COLOUR_BACKGROUND)
self.canvas.bind("<Button-1>", self.EventHandler)
self.canvas.pack()
# Create stones & board pieces
for i in range(1, 51):
self.board[i] = self.canvas.create_rectangle(IndexToBox(i), fill = COLOUR_NORMAL, width = 0)
self.stone[i] = self.canvas.create_oval(IndexToBox(i, 5))
self.SetStone(BLACK, *list(range(1, 21)))
self.SetStone(EMPTY, *list(range(21, 31)))
self.SetStone(WHITE, *list(range(31, 51)))
# Run the message loop
def Run(self):
self.window.mainloop()
# Set the display options of stones
def SetStone(self, st, *idxList):
for idx in idxList:
if st == EMPTY:
self.canvas.itemconfig(self.stone[idx], state = tkinter.HIDDEN)
else:
self.canvas.itemconfig(self.stone[idx], state = tkinter.NORMAL)
if HasFlag(st, BLACK):
self.canvas.itemconfig(self.stone[idx], fill = COLOUR_BLACK, outline = COLOUR_WHITE)
elif HasFlag(st, WHITE):
self.canvas.itemconfig(self.stone[idx], fill = COLOUR_WHITE, outline = COLOUR_BLACK)
if HasFlag(st, KING):
self.canvas.itemconfig(self.stone[idx], width = 3)
else:
self.canvas.itemconfig(self.stone[idx], width = 0)
# Set the display options of board
def SetBoard(self, marked, *idxList):
for idx in idxList:
if marked:
self.canvas.itemconfig(self.board[idx], fill = COLOUR_MARKED)
else:
self.canvas.itemconfig(self.board[idx], fill = COLOUR_NORMAL)
# Show the given move in animation.
# st: The state of the stone being moved
def Move(self, move, st):
prev = move[0]
self.SetBoard(False, prev)
for this in move[1: ]:
self.SetBoard(False, this)
self.SetStone(st, this)
d = GetDirection(prev, this)
while prev != this:
self.SetStone(EMPTY, prev)
prev = Adjacent(prev, d)
self.canvas.update_idletasks()
time.sleep(0.5)
if (this<=5 and HasFlag(st, WHITE)) or (this>=46 and HasFlag(st, BLACK)):
self.SetStone(st|KING, this)
self.canvas.update_idletasks()
# Deal with mouse events and call back
def EventHandler(self, event):
dispatcher = self.canvas.find_withtag(tkinter.CURRENT) # get the object that trigger the event
if len(dispatcher)==0: # illegal
return
if dispatcher[0] in self.board: # clicked on the board
self.Callback(self.board.index(dispatcher[0]), BOARD)
else: # clicked at a stone
self.Callback(self.stone.index(dispatcher[0]), STONE) | graphics.py | from utils import *
import tkinter
import time
COLOUR_BACKGROUND = "#DAB887"
COLOUR_BLACK = "#251507"
COLOUR_WHITE = "#F7F0DF"
COLOUR_NORMAL = "#A96020"
COLOUR_MARKED = "#AF2020"
class Graphics:
# Constructor of the class
def __init__(self, Callback):
self.board = [0] * 51 # stores the board pieces
self.stone = [0] * 51 # stores the stone objects
self.Callback = Callback
self.window = tkinter.Tk(className = "Draughts")
self.window.resizable(width = False, height = False)
self.canvas = tkinter.Canvas(self.window, width = 500, height = 500, background = COLOUR_BACKGROUND)
self.canvas.bind("<Button-1>", self.EventHandler)
self.canvas.pack()
# Create stones & board pieces
for i in range(1, 51):
self.board[i] = self.canvas.create_rectangle(IndexToBox(i), fill = COLOUR_NORMAL, width = 0)
self.stone[i] = self.canvas.create_oval(IndexToBox(i, 5))
self.SetStone(BLACK, *list(range(1, 21)))
self.SetStone(EMPTY, *list(range(21, 31)))
self.SetStone(WHITE, *list(range(31, 51)))
# Run the message loop
def Run(self):
self.window.mainloop()
# Set the display options of stones
def SetStone(self, st, *idxList):
for idx in idxList:
if st == EMPTY:
self.canvas.itemconfig(self.stone[idx], state = tkinter.HIDDEN)
else:
self.canvas.itemconfig(self.stone[idx], state = tkinter.NORMAL)
if HasFlag(st, BLACK):
self.canvas.itemconfig(self.stone[idx], fill = COLOUR_BLACK, outline = COLOUR_WHITE)
elif HasFlag(st, WHITE):
self.canvas.itemconfig(self.stone[idx], fill = COLOUR_WHITE, outline = COLOUR_BLACK)
if HasFlag(st, KING):
self.canvas.itemconfig(self.stone[idx], width = 3)
else:
self.canvas.itemconfig(self.stone[idx], width = 0)
# Set the display options of board
def SetBoard(self, marked, *idxList):
for idx in idxList:
if marked:
self.canvas.itemconfig(self.board[idx], fill = COLOUR_MARKED)
else:
self.canvas.itemconfig(self.board[idx], fill = COLOUR_NORMAL)
# Show the given move in animation.
# st: The state of the stone being moved
def Move(self, move, st):
prev = move[0]
self.SetBoard(False, prev)
for this in move[1: ]:
self.SetBoard(False, this)
self.SetStone(st, this)
d = GetDirection(prev, this)
while prev != this:
self.SetStone(EMPTY, prev)
prev = Adjacent(prev, d)
self.canvas.update_idletasks()
time.sleep(0.5)
if (this<=5 and HasFlag(st, WHITE)) or (this>=46 and HasFlag(st, BLACK)):
self.SetStone(st|KING, this)
self.canvas.update_idletasks()
# Deal with mouse events and call back
def EventHandler(self, event):
dispatcher = self.canvas.find_withtag(tkinter.CURRENT) # get the object that trigger the event
if len(dispatcher)==0: # illegal
return
if dispatcher[0] in self.board: # clicked on the board
self.Callback(self.board.index(dispatcher[0]), BOARD)
else: # clicked at a stone
self.Callback(self.stone.index(dispatcher[0]), STONE) | 0.159479 | 0.10942 |
class HighscoreEntry:
# Konstruktor, der als Eingabewerte den Nickname und
# die erreichten Punkte erwartet
def __init__(self, nickname, points):
self.__nickname = nickname
self.__points = points
# Öffentliche Methode zur Ausgabe eines Strings mit der
# Angabe von Nickname und erreichter Punkte
def __str__(self):
return self.__nickname + " - " + str(self.__points) + " Punkte"
# Klasse zur Realisierung einer Highscore-Tabelle
class HighscoreTable:
# Konstruktor der Klasse, der die Initialisierung vornimmt
def __init__(self):
# Lege leere Highscore-Liste mit 10 Platzierungen an
self.__entries = []
for i in range(0, 10):
self.__entries.append(HighscoreEntry("Name" + str(i), 0))
# Öffentliche Methode zum Hinzufügen eines neuen Eintrags in die
# Highscore-Liste. Übergeben werden der Nickname, die erreichten
# Punkte sowie die Position innerhalb der Liste.
def addEntry(self, nickname, points, position):
# Nutze die HighscoreEntry-Klasse
entry = HighscoreEntry(nickname, points)
# Gehe alte Liste bis zur Position durch
entriesTemp = []
for i in range(0, position - 1):
# Füge altes Element hinzu
entriesTemp.append(self.__entries[i])
# Füge jetzt neues Element hinzu
entriesTemp.append(entry)
# Gehe Rest der Liste durch
for i in range(position - 1, len(self.__entries)):
# Füge altes Element hinzu
entriesTemp.append(self.__entries[i])
# Setze temporäre Liste als neue Liste
self.__entries = entriesTemp
# Öffentliche Methode zur Ausgabe der Highscore-Liste
def printList(self):
pos = 1
for entry in self.__entries:
print "Platz " + str(pos) + ": " + str(entry)
pos += 1
# Startpunkt des Hauptprogramms
# Hier werden die implementierten Klassen zu Demonstrations- und
# Testzwecken instanziiert und verwendet.
hs = HighscoreTable()
hs.addEntry("Dieter", 666, 1)
hs.addEntry("Thomas", 12, 6)
hs.printList() | loesungen_in_python/09-referenzdatentypen/aufgabe_W_9_03_highscore/aufgabe_W_9_03_highscore.pyde | class HighscoreEntry:
# Konstruktor, der als Eingabewerte den Nickname und
# die erreichten Punkte erwartet
def __init__(self, nickname, points):
self.__nickname = nickname
self.__points = points
# Öffentliche Methode zur Ausgabe eines Strings mit der
# Angabe von Nickname und erreichter Punkte
def __str__(self):
return self.__nickname + " - " + str(self.__points) + " Punkte"
# Klasse zur Realisierung einer Highscore-Tabelle
class HighscoreTable:
# Konstruktor der Klasse, der die Initialisierung vornimmt
def __init__(self):
# Lege leere Highscore-Liste mit 10 Platzierungen an
self.__entries = []
for i in range(0, 10):
self.__entries.append(HighscoreEntry("Name" + str(i), 0))
# Öffentliche Methode zum Hinzufügen eines neuen Eintrags in die
# Highscore-Liste. Übergeben werden der Nickname, die erreichten
# Punkte sowie die Position innerhalb der Liste.
def addEntry(self, nickname, points, position):
# Nutze die HighscoreEntry-Klasse
entry = HighscoreEntry(nickname, points)
# Gehe alte Liste bis zur Position durch
entriesTemp = []
for i in range(0, position - 1):
# Füge altes Element hinzu
entriesTemp.append(self.__entries[i])
# Füge jetzt neues Element hinzu
entriesTemp.append(entry)
# Gehe Rest der Liste durch
for i in range(position - 1, len(self.__entries)):
# Füge altes Element hinzu
entriesTemp.append(self.__entries[i])
# Setze temporäre Liste als neue Liste
self.__entries = entriesTemp
# Öffentliche Methode zur Ausgabe der Highscore-Liste
def printList(self):
pos = 1
for entry in self.__entries:
print "Platz " + str(pos) + ": " + str(entry)
pos += 1
# Startpunkt des Hauptprogramms
# Hier werden die implementierten Klassen zu Demonstrations- und
# Testzwecken instanziiert und verwendet.
hs = HighscoreTable()
hs.addEntry("Dieter", 666, 1)
hs.addEntry("Thomas", 12, 6)
hs.printList() | 0.358016 | 0.306838 |
# We'll work with heuristics:
# 1. Aggregate height (minimize)
# 2. Complete lines (maximize)
# 3. Holes (minimize)
# 4. Bumpiness (minimize)
import curses
import debug
class AI:
def __init__(self, game):
# The AI has access to its game object, allowing it to directly call methods in order to move and rotate the block, etc.
self.game = game
self.window = game.windowObject
self.command = None
def setBlock(self, block):
# Set the block object.
# We don't actually do anything with this, except get the coordinates of the block,
# which are required for calculations.
self.blockObj = block
def setNextBlock(self, block):
# Set the next block
self.nextblock = block
def getCommand(self):
# Returns the command that the AI wants to perform.
# This is actually a curses.KEY_* value.
return self.command
def computeHeuristics(self):
# Encapsulation function for computing the various heuristics.
grid = self.window.grid
self.game.dropblock(False) # False indicates that the changes are temporary.
# Actual checks
lines = self.checklines(grid)
holes = self.checkholes(grid)
height = self.checkheight(grid)
bumpiness = self.checkbumpiness(grid)
# NOTE: Remember to undrop the block!
self.game.undrop()
if lines:
debug.debug("AI detected completed lines: %s" % lines)
if holes:
debug.debug("AI detected new holes: %s" % holes)
if bumpiness:
debug.debug("AI detected bumpiness level: %s" % bumpiness)
debug.debug("Height of grid: %s" % height)
def checklines(self, grid):
# Checks how many lines will be completed on the grid
linerange = self.window.endx - (self.window.startx + 1)
lines = 0
for y in grid:
if len(grid[y]) == linerange:
lines += 1
return lines
def checkholes(self, grid):
# We need to go through all the grid's positions.
# If a given y,x position is occupied, we check whether (y+1, x) is also occupied.
# If it is, that means we have a hole.
holes = 0
for y in grid:
xes = grid[y]
for xtuple in xes:
# This HAS to be an occupied position, as our grid is composed of:
# y: [(x, c), (x, c), (x, c), (x, c), ...] positions.
# Therefore if we can loop on it, it exists in the grid, and this y,x position is taken.
if y+1 in grid:
x = xtuple[0]
exes = self.window.extractxes(grid, y+1) # "Extracted xes"
if x not in exes:
# Hole detected.
holes += 1
# We divide by two because each "x unit" of a block is actually made up of 2 x positions.
# So the I block actually takes up 8 x spaces when it's vertical, not 4.
return int(holes / 2)
def formxlist(self, grid):
# Forms the "x list" so we can calculate height and bumpiness
xlist = []
temp = []
add = True
for y in grid:
if add:
# Since each column is made up of two x positions and one y position, we don't need to add both.
# We skip over one of them each time.
xes = self.window.extractxes(grid, y)
for x in xes:
temp.append(y)
xlist.append(temp)
temp = []
add = False
else:
add = True
return xlist
def checkheight(self, grid):
# We check the height of the grid.
# In order to account for holes, we start the total sum with checkholes()
total = self.checkholes(grid)
xlist = self.formxlist(grid)
# Now we have a list.
return sum(len(x) for x in xlist)
def checkbumpiness(self, grid):
# Here we compute the absolute value of between all two adjacent columns.
# We check holes as well in order to account for them.
xlist = self.formxlist(grid)
total = self.checkholes(grid)
for i, x in enumerate(xlist[::2]):
try:
length = len(x) - len(xlist[i + 1])
total += length
except IndexError:
total += len(xlist[i])
return total | src/ai.py |
# We'll work with heuristics:
# 1. Aggregate height (minimize)
# 2. Complete lines (maximize)
# 3. Holes (minimize)
# 4. Bumpiness (minimize)
import curses
import debug
class AI:
def __init__(self, game):
# The AI has access to its game object, allowing it to directly call methods in order to move and rotate the block, etc.
self.game = game
self.window = game.windowObject
self.command = None
def setBlock(self, block):
# Set the block object.
# We don't actually do anything with this, except get the coordinates of the block,
# which are required for calculations.
self.blockObj = block
def setNextBlock(self, block):
# Set the next block
self.nextblock = block
def getCommand(self):
# Returns the command that the AI wants to perform.
# This is actually a curses.KEY_* value.
return self.command
def computeHeuristics(self):
# Encapsulation function for computing the various heuristics.
grid = self.window.grid
self.game.dropblock(False) # False indicates that the changes are temporary.
# Actual checks
lines = self.checklines(grid)
holes = self.checkholes(grid)
height = self.checkheight(grid)
bumpiness = self.checkbumpiness(grid)
# NOTE: Remember to undrop the block!
self.game.undrop()
if lines:
debug.debug("AI detected completed lines: %s" % lines)
if holes:
debug.debug("AI detected new holes: %s" % holes)
if bumpiness:
debug.debug("AI detected bumpiness level: %s" % bumpiness)
debug.debug("Height of grid: %s" % height)
def checklines(self, grid):
# Checks how many lines will be completed on the grid
linerange = self.window.endx - (self.window.startx + 1)
lines = 0
for y in grid:
if len(grid[y]) == linerange:
lines += 1
return lines
def checkholes(self, grid):
# We need to go through all the grid's positions.
# If a given y,x position is occupied, we check whether (y+1, x) is also occupied.
# If it is, that means we have a hole.
holes = 0
for y in grid:
xes = grid[y]
for xtuple in xes:
# This HAS to be an occupied position, as our grid is composed of:
# y: [(x, c), (x, c), (x, c), (x, c), ...] positions.
# Therefore if we can loop on it, it exists in the grid, and this y,x position is taken.
if y+1 in grid:
x = xtuple[0]
exes = self.window.extractxes(grid, y+1) # "Extracted xes"
if x not in exes:
# Hole detected.
holes += 1
# We divide by two because each "x unit" of a block is actually made up of 2 x positions.
# So the I block actually takes up 8 x spaces when it's vertical, not 4.
return int(holes / 2)
def formxlist(self, grid):
# Forms the "x list" so we can calculate height and bumpiness
xlist = []
temp = []
add = True
for y in grid:
if add:
# Since each column is made up of two x positions and one y position, we don't need to add both.
# We skip over one of them each time.
xes = self.window.extractxes(grid, y)
for x in xes:
temp.append(y)
xlist.append(temp)
temp = []
add = False
else:
add = True
return xlist
def checkheight(self, grid):
# We check the height of the grid.
# In order to account for holes, we start the total sum with checkholes()
total = self.checkholes(grid)
xlist = self.formxlist(grid)
# Now we have a list.
return sum(len(x) for x in xlist)
def checkbumpiness(self, grid):
# Here we compute the absolute value of between all two adjacent columns.
# We check holes as well in order to account for them.
xlist = self.formxlist(grid)
total = self.checkholes(grid)
for i, x in enumerate(xlist[::2]):
try:
length = len(x) - len(xlist[i + 1])
total += length
except IndexError:
total += len(xlist[i])
return total | 0.635675 | 0.66072 |
import os
import logging
import torch
import torch.nn as nn
import baseline as bl
from baseline.utils import (
export,
Offsets,
write_json,
load_vectorizers,
find_model_basename,
)
from baseline.model import load_model_for
from baseline.vectorizers import (
GOVectorizer,
Dict1DVectorizer,
Char2DVectorizer,
Dict2DVectorizer,
Char1DVectorizer,
Token1DVectorizer,
)
from mead.utils import (
get_output_paths,
create_metadata,
save_to_bundle,
)
from mead.exporters import Exporter, register_exporter
from mead.pytorch.tagger_decoders import InferenceCRF, InferenceGreedyDecoder
__all__ = []
exporter = export(__all__)
logger = logging.getLogger('mead')
VECTORIZER_SHAPE_MAP = {
Token1DVectorizer: [1, 10],
GOVectorizer: [1, 10],
Dict1DVectorizer: [1, 10],
Char2DVectorizer: [1, 10, 5],
Dict2DVectorizer: [1, 10, 5],
Char1DVectorizer: [1, 10],
}
def create_fake_data(shapes, vectorizers, order, min_=0, max_=50,):
data = {
k: torch.randint(min_, max_, shapes[type(v)]) for k, v in vectorizers.items()
}
ordered_data = tuple(data[k] for k in order)
lengths = torch.LongTensor([data[list(data.keys())[0]].shape[1]])
return ordered_data, lengths
def monkey_patch_embeddings(model):
order = tuple(k for k, _ in model.embeddings.items())
logger.debug("Using %s as the feature order", order)
model.ordered_embeddings = tuple(model.embeddings[k] for k in order)
def embed(self, x):
res = []
for i in range(len(x)):
res.append(self.ordered_embeddings[i](x[i]))
return torch.cat(res, dim=2)
model.embed = embed.__get__(model)
return order
class ExportingTagger(nn.Module):
def __init__(self, tagger):
super(ExportingTagger, self).__init__()
self.tagger = tagger
if hasattr(tagger, 'crf'):
logger.debug("Found CRF, replacing with torch script decoder.")
self.decoder = InferenceCRF(
self.tagger.crf.transitions.squeeze(0),
self.tagger.crf.start_idx,
self.tagger.crf.end_idx
)
else:
if tagger.constraint is None:
# This just calls torch.max, this is normally done in code for the tagger but we
# wrap in a class here so that we can have a consistent forward.
self.decoder = InferenceGreedyDecoder()
else:
logger.debug("Found constraints for decoding, replacing with torch script decoder.")
self.decoder = InferenceCRF(
self.tagger.constraint.squeeze(0),
Offsets.GO,
Offsets.EOS
)
def forward(self, x, l):
trans_x = []
for i in range(len(x)):
trans_x.append(x[i].transpose(0, 1))
new_x = tuple(trans_x)
x = self.tagger.compute_unaries(new_x, l)
return self.decoder.decode(x, l)[0]
class ExportingClassifier(nn.Module):
def __init__(self, classifier):
super(ExportingClassifier, self).__init__()
self.classifier = classifier
def forward(self, x, l):
x = self.classifier.embed(x)
x = self.classifier.pool(x, l)
x = self.classifier.stacked(x)
return self.classifier.output(x)
@exporter
class PytorchExporter(Exporter):
def __init__(self, task, **kwargs):
super(PytorchExporter, self).__init__(task, **kwargs)
self.wrapper = None
def run(self, basename, output_dir, project=None, name=None, model_version=None, **kwargs):
logger.warning("Pytorch exporting is experimental and is not guaranteed to work for plugin models.")
client_output, server_output = get_output_paths(
output_dir,
project, name,
model_version,
kwargs.get('remote', True),
)
logger.info("Saving vectorizers and vocabs to %s", client_output)
logger.info("Saving serialized model to %s", server_output)
model, vectorizers, model_name = self.load_model(basename)
order = monkey_patch_embeddings(model)
data, lengths = create_fake_data(VECTORIZER_SHAPE_MAP, vectorizers, order)
meta = create_metadata(
order, ['output'],
self.sig_name,
model_name, model.lengths_key,
exporter_type=self.preproc_type()
)
exportable = self.wrapper(model)
logger.info("Tracing Model.")
traced = torch.jit.trace(exportable, (data, lengths))
traced.save(os.path.join(server_output, 'model.pt'))
logger.info("Saving metadata.")
save_to_bundle(client_output, basename, assets=meta)
logger.info('Successfully exported model to %s', output_dir)
def load_model(self, model_dir):
model_name = find_model_basename(model_dir)
vectorizers = load_vectorizers(model_dir)
model = load_model_for(self.task.task_name(), model_name, device='cpu')
model = model.cpu()
model.eval()
model_name = os.path.basename(model_name)
return model, vectorizers, model_name
@exporter
@register_exporter(task='classify', name='default')
class ClassifyPytorchExporter(PytorchExporter):
def __init__(self, task, **kwargs):
super(ClassifyPytorchExporter, self).__init__(task)
self.wrapper = ExportingClassifier
self.sig_name = 'predict_text'
@exporter
@register_exporter(task='tagger', name='default')
class TaggerPytorchExporter(PytorchExporter):
def __init__(self, task, **kwargs):
super(TaggerPytorchExporter, self).__init__(task)
self.wrapper = ExportingTagger
self.sig_name = 'tag_text'
@exporter
@register_exporter(task='seq2seq', name='default')
class Seq2SeqPytorchExporter(PytorchExporter):
def __init__(self, task, **kwargs):
raise NotImplementedError | python/mead/pytorch/exporters.py | import os
import logging
import torch
import torch.nn as nn
import baseline as bl
from baseline.utils import (
export,
Offsets,
write_json,
load_vectorizers,
find_model_basename,
)
from baseline.model import load_model_for
from baseline.vectorizers import (
GOVectorizer,
Dict1DVectorizer,
Char2DVectorizer,
Dict2DVectorizer,
Char1DVectorizer,
Token1DVectorizer,
)
from mead.utils import (
get_output_paths,
create_metadata,
save_to_bundle,
)
from mead.exporters import Exporter, register_exporter
from mead.pytorch.tagger_decoders import InferenceCRF, InferenceGreedyDecoder
__all__ = []
exporter = export(__all__)
logger = logging.getLogger('mead')
VECTORIZER_SHAPE_MAP = {
Token1DVectorizer: [1, 10],
GOVectorizer: [1, 10],
Dict1DVectorizer: [1, 10],
Char2DVectorizer: [1, 10, 5],
Dict2DVectorizer: [1, 10, 5],
Char1DVectorizer: [1, 10],
}
def create_fake_data(shapes, vectorizers, order, min_=0, max_=50,):
data = {
k: torch.randint(min_, max_, shapes[type(v)]) for k, v in vectorizers.items()
}
ordered_data = tuple(data[k] for k in order)
lengths = torch.LongTensor([data[list(data.keys())[0]].shape[1]])
return ordered_data, lengths
def monkey_patch_embeddings(model):
order = tuple(k for k, _ in model.embeddings.items())
logger.debug("Using %s as the feature order", order)
model.ordered_embeddings = tuple(model.embeddings[k] for k in order)
def embed(self, x):
res = []
for i in range(len(x)):
res.append(self.ordered_embeddings[i](x[i]))
return torch.cat(res, dim=2)
model.embed = embed.__get__(model)
return order
class ExportingTagger(nn.Module):
def __init__(self, tagger):
super(ExportingTagger, self).__init__()
self.tagger = tagger
if hasattr(tagger, 'crf'):
logger.debug("Found CRF, replacing with torch script decoder.")
self.decoder = InferenceCRF(
self.tagger.crf.transitions.squeeze(0),
self.tagger.crf.start_idx,
self.tagger.crf.end_idx
)
else:
if tagger.constraint is None:
# This just calls torch.max, this is normally done in code for the tagger but we
# wrap in a class here so that we can have a consistent forward.
self.decoder = InferenceGreedyDecoder()
else:
logger.debug("Found constraints for decoding, replacing with torch script decoder.")
self.decoder = InferenceCRF(
self.tagger.constraint.squeeze(0),
Offsets.GO,
Offsets.EOS
)
def forward(self, x, l):
trans_x = []
for i in range(len(x)):
trans_x.append(x[i].transpose(0, 1))
new_x = tuple(trans_x)
x = self.tagger.compute_unaries(new_x, l)
return self.decoder.decode(x, l)[0]
class ExportingClassifier(nn.Module):
def __init__(self, classifier):
super(ExportingClassifier, self).__init__()
self.classifier = classifier
def forward(self, x, l):
x = self.classifier.embed(x)
x = self.classifier.pool(x, l)
x = self.classifier.stacked(x)
return self.classifier.output(x)
@exporter
class PytorchExporter(Exporter):
def __init__(self, task, **kwargs):
super(PytorchExporter, self).__init__(task, **kwargs)
self.wrapper = None
def run(self, basename, output_dir, project=None, name=None, model_version=None, **kwargs):
logger.warning("Pytorch exporting is experimental and is not guaranteed to work for plugin models.")
client_output, server_output = get_output_paths(
output_dir,
project, name,
model_version,
kwargs.get('remote', True),
)
logger.info("Saving vectorizers and vocabs to %s", client_output)
logger.info("Saving serialized model to %s", server_output)
model, vectorizers, model_name = self.load_model(basename)
order = monkey_patch_embeddings(model)
data, lengths = create_fake_data(VECTORIZER_SHAPE_MAP, vectorizers, order)
meta = create_metadata(
order, ['output'],
self.sig_name,
model_name, model.lengths_key,
exporter_type=self.preproc_type()
)
exportable = self.wrapper(model)
logger.info("Tracing Model.")
traced = torch.jit.trace(exportable, (data, lengths))
traced.save(os.path.join(server_output, 'model.pt'))
logger.info("Saving metadata.")
save_to_bundle(client_output, basename, assets=meta)
logger.info('Successfully exported model to %s', output_dir)
def load_model(self, model_dir):
model_name = find_model_basename(model_dir)
vectorizers = load_vectorizers(model_dir)
model = load_model_for(self.task.task_name(), model_name, device='cpu')
model = model.cpu()
model.eval()
model_name = os.path.basename(model_name)
return model, vectorizers, model_name
@exporter
@register_exporter(task='classify', name='default')
class ClassifyPytorchExporter(PytorchExporter):
def __init__(self, task, **kwargs):
super(ClassifyPytorchExporter, self).__init__(task)
self.wrapper = ExportingClassifier
self.sig_name = 'predict_text'
@exporter
@register_exporter(task='tagger', name='default')
class TaggerPytorchExporter(PytorchExporter):
def __init__(self, task, **kwargs):
super(TaggerPytorchExporter, self).__init__(task)
self.wrapper = ExportingTagger
self.sig_name = 'tag_text'
@exporter
@register_exporter(task='seq2seq', name='default')
class Seq2SeqPytorchExporter(PytorchExporter):
def __init__(self, task, **kwargs):
raise NotImplementedError | 0.78968 | 0.463566 |
import re
import base64
import asyncio
import logging
from typing import Optional, Union
import requests
import wechaty
from wechaty import (
FileBox,
Wechaty,
Contact,
Room,
Message
)
from config import CQ_API_URL
from crud import get_qq_by_wx, add_user
from database import Base, engine, Session, SessionLocal
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class MemeBot(Wechaty):
def __init__(self):
self.login_user: Optional[Contact] = None
super().__init__()
@staticmethod
def check_qq(self, text: str) -> [bool, str]:
pattern = re.compile("[1-9]\\d{4,10}")
if len(pattern.findall(text)) == 1:
return True, pattern.findall(text)[0]
return False, ''
async def on_message(self, msg: Message):
from_contact: Contact = msg.talker()
text: str = msg.text()
room: Optional[Room] = msg.room()
file_box = None
if msg.type() in wechaty.user.message.SUPPORTED_MESSAGE_FILE_TYPES:
file_box: Optional[FileBox] = await msg.to_file_box()
db: Session = SessionLocal()
conversation: Union[Room, Contact] = from_contact if room is None else room
await conversation.ready()
qq = get_qq_by_wx(db=db, wx_id=conversation.contact_id)
is_qq, checked_qq = self.check_qq(self=self, text=text)
if file_box and qq:
str_img = base64.b64encode(requests.get(file_box.remoteUrl).content).decode("utf-8").replace("b'", "").replace("'", "")
requests.post(
url=f'{CQ_API_URL}/send_msg',
json={
'user_id': qq,
'message': f'[CQ:image,file=base64://{str_img}, type=show,id=40000]'
}
)
elif is_qq:
add_user(db=db, wx_id=conversation.contact_id, qq=checked_qq)
await conversation.say('绑定成功!')
elif not qq:
await conversation.say('请输入QQ号以完成绑定!')
db.close()
async def on_login(self, contact: Contact):
logger.info('Contact<%s> has logined ...', contact)
self.login_user = contact
bot: Optional[MemeBot] = None
async def main():
Base.metadata.create_all(bind=engine)
global bot
bot = MemeBot()
await bot.start()
asyncio.run(main()) | src/main.py | import re
import base64
import asyncio
import logging
from typing import Optional, Union
import requests
import wechaty
from wechaty import (
FileBox,
Wechaty,
Contact,
Room,
Message
)
from config import CQ_API_URL
from crud import get_qq_by_wx, add_user
from database import Base, engine, Session, SessionLocal
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class MemeBot(Wechaty):
def __init__(self):
self.login_user: Optional[Contact] = None
super().__init__()
@staticmethod
def check_qq(self, text: str) -> [bool, str]:
pattern = re.compile("[1-9]\\d{4,10}")
if len(pattern.findall(text)) == 1:
return True, pattern.findall(text)[0]
return False, ''
async def on_message(self, msg: Message):
from_contact: Contact = msg.talker()
text: str = msg.text()
room: Optional[Room] = msg.room()
file_box = None
if msg.type() in wechaty.user.message.SUPPORTED_MESSAGE_FILE_TYPES:
file_box: Optional[FileBox] = await msg.to_file_box()
db: Session = SessionLocal()
conversation: Union[Room, Contact] = from_contact if room is None else room
await conversation.ready()
qq = get_qq_by_wx(db=db, wx_id=conversation.contact_id)
is_qq, checked_qq = self.check_qq(self=self, text=text)
if file_box and qq:
str_img = base64.b64encode(requests.get(file_box.remoteUrl).content).decode("utf-8").replace("b'", "").replace("'", "")
requests.post(
url=f'{CQ_API_URL}/send_msg',
json={
'user_id': qq,
'message': f'[CQ:image,file=base64://{str_img}, type=show,id=40000]'
}
)
elif is_qq:
add_user(db=db, wx_id=conversation.contact_id, qq=checked_qq)
await conversation.say('绑定成功!')
elif not qq:
await conversation.say('请输入QQ号以完成绑定!')
db.close()
async def on_login(self, contact: Contact):
logger.info('Contact<%s> has logined ...', contact)
self.login_user = contact
bot: Optional[MemeBot] = None
async def main():
Base.metadata.create_all(bind=engine)
global bot
bot = MemeBot()
await bot.start()
asyncio.run(main()) | 0.640861 | 0.063978 |
import boto3
import logging
from flask import current_app, abort, jsonify
from zappa.asynchronous import task
from reports.tasks.validation import validate_state
from reports.reporting import release_summary
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def publish(task_id, release_id):
""" Set state to 'publishing' and call the publishing task """
endpoint_url = current_app.config['DYNAMO_ENDPOINT']
db = boto3.resource('dynamodb', endpoint_url=endpoint_url)
table = db.Table(current_app.config['TASK_TABLE'])
validate_state(task_id, 'begin publishing')
# Update the task to published in db
task = table.update_item(
Key={'task_id': task_id},
UpdateExpression='SET #st = :new',
ExpressionAttributeNames={'#st': 'state'},
ExpressionAttributeValues={':new': 'publishing'},
ReturnValues='ALL_NEW'
)
# Task should exist, give it returned earlier. Must be an update issue
if 'Attributes' not in task or len(task['Attributes']) == 0:
return abort(500, f"problem updating '{task_id}'")
# Invoke the publish task
do_publish(task_id, release_id)
return jsonify(task['Attributes']), 200
@task
def do_publish(task_id, release_id):
"""
All work is done in the 'running' state, so just update to 'published'
here and report to coordinator.
"""
if not current_app:
from manage import app
with app.app_context():
publish_in_context(task_id, release_id)
else:
publish_in_context(task_id, release_id)
def publish_in_context(task_id, release_id):
endpoint_url = current_app.config['DYNAMO_ENDPOINT']
db = boto3.resource('dynamodb', endpoint_url=endpoint_url)
table = db.Table(current_app.config['TASK_TABLE'])
validate_state(task_id, 'publish')
# Update the task to published in db
task = table.update_item(
Key={'task_id': task_id},
UpdateExpression='SET #st = :new',
ExpressionAttributeNames={'#st': 'state'},
ExpressionAttributeValues={':new': 'published'},
ReturnValues='ALL_NEW'
)
# Task should exist, give it returned earlier. Must be an update issue
if 'Attributes' not in task or len(task['Attributes']) == 0:
logger.error(f"problem updating task '{task_id}'")
return abort(500, f"problem updating '{task_id}'")
# Finally, update the summary rows with new state and version
release_summary.publish(release_id)
return jsonify(task['Attributes']), 200 | reports/tasks/publish.py | import boto3
import logging
from flask import current_app, abort, jsonify
from zappa.asynchronous import task
from reports.tasks.validation import validate_state
from reports.reporting import release_summary
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def publish(task_id, release_id):
""" Set state to 'publishing' and call the publishing task """
endpoint_url = current_app.config['DYNAMO_ENDPOINT']
db = boto3.resource('dynamodb', endpoint_url=endpoint_url)
table = db.Table(current_app.config['TASK_TABLE'])
validate_state(task_id, 'begin publishing')
# Update the task to published in db
task = table.update_item(
Key={'task_id': task_id},
UpdateExpression='SET #st = :new',
ExpressionAttributeNames={'#st': 'state'},
ExpressionAttributeValues={':new': 'publishing'},
ReturnValues='ALL_NEW'
)
# Task should exist, give it returned earlier. Must be an update issue
if 'Attributes' not in task or len(task['Attributes']) == 0:
return abort(500, f"problem updating '{task_id}'")
# Invoke the publish task
do_publish(task_id, release_id)
return jsonify(task['Attributes']), 200
@task
def do_publish(task_id, release_id):
"""
All work is done in the 'running' state, so just update to 'published'
here and report to coordinator.
"""
if not current_app:
from manage import app
with app.app_context():
publish_in_context(task_id, release_id)
else:
publish_in_context(task_id, release_id)
def publish_in_context(task_id, release_id):
endpoint_url = current_app.config['DYNAMO_ENDPOINT']
db = boto3.resource('dynamodb', endpoint_url=endpoint_url)
table = db.Table(current_app.config['TASK_TABLE'])
validate_state(task_id, 'publish')
# Update the task to published in db
task = table.update_item(
Key={'task_id': task_id},
UpdateExpression='SET #st = :new',
ExpressionAttributeNames={'#st': 'state'},
ExpressionAttributeValues={':new': 'published'},
ReturnValues='ALL_NEW'
)
# Task should exist, give it returned earlier. Must be an update issue
if 'Attributes' not in task or len(task['Attributes']) == 0:
logger.error(f"problem updating task '{task_id}'")
return abort(500, f"problem updating '{task_id}'")
# Finally, update the summary rows with new state and version
release_summary.publish(release_id)
return jsonify(task['Attributes']), 200 | 0.427875 | 0.06951 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_id', models.CharField(db_index=True, max_length=4096)),
('in_reply_to', models.CharField(blank=True, db_index=True, max_length=4096)),
('date', models.DateTimeField(db_index=True)),
('subject', models.CharField(max_length=4096)),
('stripped_subject', models.CharField(db_index=True, max_length=4096)),
('version', models.PositiveSmallIntegerField(default=0)),
('sender', models.CharField(db_index=True, max_length=4096)),
('receivers', models.TextField()),
('prefixes', models.TextField(blank=True)),
('is_series_head', models.BooleanField()),
('is_complete', models.BooleanField(default=False)),
('is_patch', models.BooleanField()),
('patch_num', models.PositiveSmallIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='MessageProperty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('value', models.TextField(blank=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Message')),
],
options={
'verbose_name_plural': 'Message Properties',
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('enabled', models.BooleanField(default=True)),
('config', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='ModuleAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=256, unique=True)),
('text', models.TextField(blank=True)),
('file', models.ImageField(blank=True, upload_to='module-asset')),
('module', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='api.Module')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=1024)),
('mailing_list', models.CharField(blank=True, max_length=4096)),
('url', models.CharField(blank=True, max_length=4096)),
('git', models.CharField(blank=True, max_length=4096)),
('description', models.TextField(blank=True)),
('logo', models.ImageField(blank=True, upload_to='logo')),
],
),
migrations.AddField(
model_name='message',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Project'),
),
migrations.AlterUniqueTogether(
name='messageproperty',
unique_together=set([('message', 'name')]),
),
migrations.AlterUniqueTogether(
name='message',
unique_together=set([('project', 'message_id')]),
),
] | api/migrations/0001_initial.py | from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_id', models.CharField(db_index=True, max_length=4096)),
('in_reply_to', models.CharField(blank=True, db_index=True, max_length=4096)),
('date', models.DateTimeField(db_index=True)),
('subject', models.CharField(max_length=4096)),
('stripped_subject', models.CharField(db_index=True, max_length=4096)),
('version', models.PositiveSmallIntegerField(default=0)),
('sender', models.CharField(db_index=True, max_length=4096)),
('receivers', models.TextField()),
('prefixes', models.TextField(blank=True)),
('is_series_head', models.BooleanField()),
('is_complete', models.BooleanField(default=False)),
('is_patch', models.BooleanField()),
('patch_num', models.PositiveSmallIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='MessageProperty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('value', models.TextField(blank=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Message')),
],
options={
'verbose_name_plural': 'Message Properties',
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('enabled', models.BooleanField(default=True)),
('config', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='ModuleAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=256, unique=True)),
('text', models.TextField(blank=True)),
('file', models.ImageField(blank=True, upload_to='module-asset')),
('module', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='api.Module')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=1024)),
('mailing_list', models.CharField(blank=True, max_length=4096)),
('url', models.CharField(blank=True, max_length=4096)),
('git', models.CharField(blank=True, max_length=4096)),
('description', models.TextField(blank=True)),
('logo', models.ImageField(blank=True, upload_to='logo')),
],
),
migrations.AddField(
model_name='message',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Project'),
),
migrations.AlterUniqueTogether(
name='messageproperty',
unique_together=set([('message', 'name')]),
),
migrations.AlterUniqueTogether(
name='message',
unique_together=set([('project', 'message_id')]),
),
] | 0.63409 | 0.179459 |
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.db.models import Lookup, Q, Value
from django.db.models.fields import Field
from django.db.models.functions import Concat
from mptt.fields import TreeForeignKey
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.ns_tree import NS_Node
from relativity.fields import L, Relationship
from relativity.mptt import MPTTDescendants, MPTTSubtree
from relativity.treebeard import MP_Descendants, NS_Descendants, MP_Subtree, NS_Subtree
class LinkedNode(models.Model):
name = models.CharField(max_length=30)
prev_id = models.IntegerField(null=True)
next = Relationship(
"self",
predicate=Q(prev_id=L("id")),
reverse_multiple=False,
multiple=False,
related_name="prev",
)
@Field.register_lookup
class NotEqual(Lookup):
lookup_name = "ne"
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return "%s <> %s" % (lhs, rhs), params
class BasePage(models.Model):
name = models.TextField()
slug = models.CharField(unique=True, null=False, blank=False, max_length=255)
class Meta:
abstract = True
def __str__(self):
return self.name
class MPTTPage(MPTTModel, BasePage):
parent = TreeForeignKey(
"self", on_delete=models.CASCADE, null=True, blank=True, related_name="children"
)
descendants = MPTTDescendants()
subtree = MPTTSubtree()
class TBMPPage(MP_Node, BasePage):
descendants = MP_Descendants()
subtree = MP_Subtree()
class TBNSPage(NS_Node, BasePage):
descendants = NS_Descendants()
subtree = NS_Subtree()
class PageBase(BasePage):
descendants = Relationship(
"self",
Q(slug__startswith=L("slug"), slug__ne=L("slug")),
related_name="ascendants",
)
subtree = Relationship(
"self", Q(slug__startswith=L("slug")), related_name="rootpath"
)
class Meta:
abstract = True
def __str__(self):
return self.name
class Page(PageBase):
pass
class Categorised(models.Model):
category_codes = models.TextField()
class CategoryBase(models.Model):
code = models.CharField(unique=True, max_length=255)
members = Relationship(
Categorised,
Q(category_codes__contains=L("code")),
related_name="categories",
)
class Meta:
abstract = True
def __str__(self):
return "Category #%d: %s" % (self.pk, self.code)
class Category(CategoryBase):
pass
class Product(models.Model):
sku = models.CharField(max_length=13)
colour = models.CharField(max_length=20)
shape = models.CharField(max_length=20)
size = models.IntegerField()
deleted = models.BooleanField(default=False)
def __str__(self):
return "Product #%s: a %s %s, size %s" % (
self.sku,
self.colour,
self.shape,
self.size,
)
class CartItem(models.Model):
product_code = models.CharField(max_length=13)
description = models.TextField()
product = Relationship(
Product,
Q(deleted=False, sku=L("product_code")),
related_name="cart_items",
multiple=False,
null=False,
)
def __str__(self):
return "Cart item #%s: product code %s" % (self.pk, self.product_code)
class ProductFilter(models.Model):
fcolour = models.CharField(max_length=20)
fsize = models.IntegerField()
products = Relationship(
Product, Q(colour=L("fcolour"), size__gte=L("fsize")), related_name="filters"
)
cartitems = Relationship(
CartItem,
Q(product__colour=L("fcolour"), product__size__gte=L("fsize")),
related_name="filters",
)
def __str__(self):
return "ProductFilter #%d: %s size %s" % (self.pk, self.fcolour, self.fsize)
class User(models.Model):
username = models.CharField(primary_key=True, max_length=255)
def __str__(self):
return self.username
class Chemical(models.Model):
formula = models.TextField()
chemical_name = models.TextField()
common_name = models.TextField(blank=True)
def __str__(self):
return self.formula
class SavedFilter(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
search_regex = models.TextField()
chemicals = Relationship(Chemical, Q(formula__regex=L("search_regex")))
class UserGenerator(models.Model):
user = Relationship(
User,
Q(username=Concat(Value("generated_for_"), L("id"))),
multiple=False,
reverse_multiple=False,
)
def save(self, *args, **kwargs):
super(UserGenerator, self).save(*args, **kwargs)
User.objects.create(username="generated_for_%d" % self.id) | tests/models.py | from __future__ import absolute_import, unicode_literals
from django.db import models
from django.db.models import Lookup, Q, Value
from django.db.models.fields import Field
from django.db.models.functions import Concat
from mptt.fields import TreeForeignKey
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.ns_tree import NS_Node
from relativity.fields import L, Relationship
from relativity.mptt import MPTTDescendants, MPTTSubtree
from relativity.treebeard import MP_Descendants, NS_Descendants, MP_Subtree, NS_Subtree
class LinkedNode(models.Model):
name = models.CharField(max_length=30)
prev_id = models.IntegerField(null=True)
next = Relationship(
"self",
predicate=Q(prev_id=L("id")),
reverse_multiple=False,
multiple=False,
related_name="prev",
)
@Field.register_lookup
class NotEqual(Lookup):
lookup_name = "ne"
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return "%s <> %s" % (lhs, rhs), params
class BasePage(models.Model):
name = models.TextField()
slug = models.CharField(unique=True, null=False, blank=False, max_length=255)
class Meta:
abstract = True
def __str__(self):
return self.name
class MPTTPage(MPTTModel, BasePage):
parent = TreeForeignKey(
"self", on_delete=models.CASCADE, null=True, blank=True, related_name="children"
)
descendants = MPTTDescendants()
subtree = MPTTSubtree()
class TBMPPage(MP_Node, BasePage):
descendants = MP_Descendants()
subtree = MP_Subtree()
class TBNSPage(NS_Node, BasePage):
descendants = NS_Descendants()
subtree = NS_Subtree()
class PageBase(BasePage):
descendants = Relationship(
"self",
Q(slug__startswith=L("slug"), slug__ne=L("slug")),
related_name="ascendants",
)
subtree = Relationship(
"self", Q(slug__startswith=L("slug")), related_name="rootpath"
)
class Meta:
abstract = True
def __str__(self):
return self.name
class Page(PageBase):
pass
class Categorised(models.Model):
category_codes = models.TextField()
class CategoryBase(models.Model):
code = models.CharField(unique=True, max_length=255)
members = Relationship(
Categorised,
Q(category_codes__contains=L("code")),
related_name="categories",
)
class Meta:
abstract = True
def __str__(self):
return "Category #%d: %s" % (self.pk, self.code)
class Category(CategoryBase):
pass
class Product(models.Model):
sku = models.CharField(max_length=13)
colour = models.CharField(max_length=20)
shape = models.CharField(max_length=20)
size = models.IntegerField()
deleted = models.BooleanField(default=False)
def __str__(self):
return "Product #%s: a %s %s, size %s" % (
self.sku,
self.colour,
self.shape,
self.size,
)
class CartItem(models.Model):
product_code = models.CharField(max_length=13)
description = models.TextField()
product = Relationship(
Product,
Q(deleted=False, sku=L("product_code")),
related_name="cart_items",
multiple=False,
null=False,
)
def __str__(self):
return "Cart item #%s: product code %s" % (self.pk, self.product_code)
class ProductFilter(models.Model):
fcolour = models.CharField(max_length=20)
fsize = models.IntegerField()
products = Relationship(
Product, Q(colour=L("fcolour"), size__gte=L("fsize")), related_name="filters"
)
cartitems = Relationship(
CartItem,
Q(product__colour=L("fcolour"), product__size__gte=L("fsize")),
related_name="filters",
)
def __str__(self):
return "ProductFilter #%d: %s size %s" % (self.pk, self.fcolour, self.fsize)
class User(models.Model):
username = models.CharField(primary_key=True, max_length=255)
def __str__(self):
return self.username
class Chemical(models.Model):
formula = models.TextField()
chemical_name = models.TextField()
common_name = models.TextField(blank=True)
def __str__(self):
return self.formula
class SavedFilter(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
search_regex = models.TextField()
chemicals = Relationship(Chemical, Q(formula__regex=L("search_regex")))
class UserGenerator(models.Model):
user = Relationship(
User,
Q(username=Concat(Value("generated_for_"), L("id"))),
multiple=False,
reverse_multiple=False,
)
def save(self, *args, **kwargs):
super(UserGenerator, self).save(*args, **kwargs)
User.objects.create(username="generated_for_%d" % self.id) | 0.698432 | 0.138666 |
import decimal
from django.contrib.auth.models import User
from django.core.cache import caches
from djsettings import djsetting, DjSettingsGroup, values
from djsettings.models import DjSetting
from djsettings.exceptions import InvalidSettingValue
from .base import BaseTestCase
class BaseTestValue:
def test_default_setting(self):
self.assertEqual(len(DjSetting.objects.filter(name=self.attr_name)), 0)
self.assertEqual(getattr(djsetting, self.attr_name), self.default)
db_obj = DjSetting.objects.get(name=self.attr_name)
self.assertEqual(db_obj.value, self.default)
dj_setting = djsetting.get_setting(self.attr_name)
self.assertEqual(db_obj.raw_value, caches['default'].get(dj_setting._get_cache_key(self.attr_name)))
def test_set_setting(self):
setattr(djsetting, self.attr_name, self.new_value)
db_obj = DjSetting.objects.get(name=self.attr_name)
self.assertEqual(getattr(djsetting, self.attr_name), self.new_value)
self.assertEqual(db_obj.value, self.new_value)
dj_setting = djsetting.get_setting(self.attr_name)
self.assertEqual(db_obj.raw_value, caches['default'].get(dj_setting._get_cache_key(self.attr_name)))
def test_invalid_value(self):
if hasattr(self, 'invalid_value'):
with self.assertRaises(InvalidSettingValue):
setattr(djsetting, self.attr_name, self.invalid_value)
class TestStringValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestStringValue, self).setUp()
self.default = 'test string'
self.attr_name = 'test_string'
self.new_value = 'testing string updated'
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_string = values.StringValue(default=self.default, min_length=5)
class TestBooleanValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestBooleanValue, self).setUp()
self.default = True
self.attr_name = 'test_boolean'
self.new_value = False
@djsetting.register
class TestSetting(DjSettingsGroup):
test_boolean = values.BooleanValue(default=self.default)
class TestIntegerValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestIntegerValue, self).setUp()
self.default = 1
self.attr_name = 'test_integer'
self.new_value = 2
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_integer = values.IntegerValue(default=self.default)
class TestFloatValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestFloatValue, self).setUp()
self.default = 1.01
self.attr_name = 'test_float'
self.new_value = 1.05
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_float = values.FloatValue(default=self.default)
class TestDecimalValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestDecimalValue, self).setUp()
self.default = decimal.Decimal(1.01)
self.attr_name = 'test_decimal'
self.new_value = decimal.Decimal(1.05)
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_decimal = values.DecimalValue(default=self.default)
class TestModelChoiceValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestModelChoiceValue, self).setUp()
self.default = User.objects.create_user('test')
self.attr_name = 'test_model_choice'
self.new_value = User.objects.create_user('test2')
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_model_choice = values.ModelChoiceValue(queryset=User.objects.all(), default=self.default)
def test_obj_deleted(self):
self.assertEqual(getattr(djsetting, self.attr_name), self.default)
setattr(djsetting, self.attr_name, self.new_value)
self.assertEqual(getattr(djsetting, self.attr_name), self.new_value)
self.new_value.delete()
self.assertEqual(getattr(djsetting, self.attr_name), None)
def test_default_obj_deleted(self):
self.default.delete()
self.assertEqual(getattr(djsetting, self.attr_name), None) | tests/test_values.py | import decimal
from django.contrib.auth.models import User
from django.core.cache import caches
from djsettings import djsetting, DjSettingsGroup, values
from djsettings.models import DjSetting
from djsettings.exceptions import InvalidSettingValue
from .base import BaseTestCase
class BaseTestValue:
def test_default_setting(self):
self.assertEqual(len(DjSetting.objects.filter(name=self.attr_name)), 0)
self.assertEqual(getattr(djsetting, self.attr_name), self.default)
db_obj = DjSetting.objects.get(name=self.attr_name)
self.assertEqual(db_obj.value, self.default)
dj_setting = djsetting.get_setting(self.attr_name)
self.assertEqual(db_obj.raw_value, caches['default'].get(dj_setting._get_cache_key(self.attr_name)))
def test_set_setting(self):
setattr(djsetting, self.attr_name, self.new_value)
db_obj = DjSetting.objects.get(name=self.attr_name)
self.assertEqual(getattr(djsetting, self.attr_name), self.new_value)
self.assertEqual(db_obj.value, self.new_value)
dj_setting = djsetting.get_setting(self.attr_name)
self.assertEqual(db_obj.raw_value, caches['default'].get(dj_setting._get_cache_key(self.attr_name)))
def test_invalid_value(self):
if hasattr(self, 'invalid_value'):
with self.assertRaises(InvalidSettingValue):
setattr(djsetting, self.attr_name, self.invalid_value)
class TestStringValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestStringValue, self).setUp()
self.default = 'test string'
self.attr_name = 'test_string'
self.new_value = 'testing string updated'
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_string = values.StringValue(default=self.default, min_length=5)
class TestBooleanValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestBooleanValue, self).setUp()
self.default = True
self.attr_name = 'test_boolean'
self.new_value = False
@djsetting.register
class TestSetting(DjSettingsGroup):
test_boolean = values.BooleanValue(default=self.default)
class TestIntegerValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestIntegerValue, self).setUp()
self.default = 1
self.attr_name = 'test_integer'
self.new_value = 2
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_integer = values.IntegerValue(default=self.default)
class TestFloatValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestFloatValue, self).setUp()
self.default = 1.01
self.attr_name = 'test_float'
self.new_value = 1.05
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_float = values.FloatValue(default=self.default)
class TestDecimalValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestDecimalValue, self).setUp()
self.default = decimal.Decimal(1.01)
self.attr_name = 'test_decimal'
self.new_value = decimal.Decimal(1.05)
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_decimal = values.DecimalValue(default=self.default)
class TestModelChoiceValue(BaseTestCase, BaseTestValue):
def setUp(self):
super(TestModelChoiceValue, self).setUp()
self.default = User.objects.create_user('test')
self.attr_name = 'test_model_choice'
self.new_value = User.objects.create_user('test2')
self.invalid_value = 'test'
@djsetting.register
class TestSetting(DjSettingsGroup):
test_model_choice = values.ModelChoiceValue(queryset=User.objects.all(), default=self.default)
def test_obj_deleted(self):
self.assertEqual(getattr(djsetting, self.attr_name), self.default)
setattr(djsetting, self.attr_name, self.new_value)
self.assertEqual(getattr(djsetting, self.attr_name), self.new_value)
self.new_value.delete()
self.assertEqual(getattr(djsetting, self.attr_name), None)
def test_default_obj_deleted(self):
self.default.delete()
self.assertEqual(getattr(djsetting, self.attr_name), None) | 0.490724 | 0.240763 |
import click
import nisyscfg
from nisyscfg.errors import LibraryError
class DeviceNotFoundError(Exception):
pass
@click.group()
def nisyscfgcli():
"""Manipulate hardware resources detected by NI System Configuration API
========================================================================"""
@nisyscfgcli.command(name="list")
@click.option(
"-v", is_flag=True, help=": displays detailed information about all aliases"
)
@click.option("-r",
default="",
help=": specify IP address or hostname of remote target"
)
def list_command(v, r):
""": displays all NI aliases"""
with nisyscfg.Session(r) as nisyscfg_session:
_list_aliases(v, r, nisyscfg_session)
@click.command(name="rename")
@click.argument("old_alias")
@click.argument("new_alias")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def rename_command(old_alias, new_alias, r):
""": change the alias of an item <old name> <new name>"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(old_alias, nisyscfg_session):
return
if _valid_alias(new_alias, nisyscfg_session, rename_flag=True):
print(f"The name '{new_alias}' is already in use. Try a different name.")
return
_rename_hardware(old_alias, new_alias, nisyscfg_session)
@click.command(name="delete")
@click.argument("alias_name")
@click.option("-y", is_flag=True, help=": Bypasses the '[y/n]?' check")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def delete_command(alias_name, y, r):
""": delete the specified item"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
delete_helper(y, alias_name, nisyscfg_session)
@click.command(name="info")
@click.argument("alias_name")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def info_command(alias_name, r):
""": provides detailed info about the specified alias"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
_info_alias(alias_name, nisyscfg_session)
@click.command(name="self_test")
@click.argument("alias_name")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def self_test_command(alias_name, r):
""": verifies alias is able to perform basic I/O functions"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
_self_test_alias(alias_name, nisyscfg_session)
@click.command(name="upgrade_firmware")
@click.argument("alias_name")
@click.option("-v", help=": update firmware to specified version")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def upgrade_firmware_command(alias_name, v, r):
""": upgrades alias firmware to latest version"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
_upgrade_alias_firmware(alias_name, v, nisyscfg_session)
# ======================================================================================================================
def _list_aliases(verbose, r, session):
print(f"Scanning {r if r else 'localhost'} for devices...\n")
alias_filter = session.create_filter()
alias_filter.is_ni_product = True
alias_filter.is_device = True
for alias in session.find_hardware(alias_filter):
_print_hardware_info(alias, verbose)
def _rename_hardware(old_name, new_name, session):
hardware_to_rename = _get_hardware(session, old_name)
hardware_to_rename.rename(new_name)
print(f"Rename successful! '{old_name}' renamed to '{new_name}'")
def _del_hardware(alias, session):
hardware_to_delete = _get_hardware(session, alias)
hardware_to_delete.delete()
def delete_helper(flag, alias, session):
if flag:
_del_hardware(alias, session)
print(f"Item '{alias}' deleted.")
return
while True:
print(f"Are you sure you want to delete item {alias} [y/n]?")
response = input()
response = response.lower()
if response == "y" or response == "yes":
_del_hardware(alias, session)
print(f"Item {alias} deleted")
break
if response == "n" or response == "no":
print("Delete aborted")
break
else:
continue
def _info_alias(alias, session):
hardware_to_get_info = _get_hardware(session, alias)
_print_hardware_info(hardware_to_get_info, verbose=True)
def _self_test_alias(alias, session):
hardware_to_test = _get_hardware(session, alias)
try:
hardware_to_test.self_test()
print("Self test completed successfully!")
except LibraryError as err:
print("Self test failed")
print("Errors: ", err)
return
def _upgrade_alias_firmware(alias, version, session):
hardware_to_upgrade = _get_hardware(session, alias)
try:
hardware_to_upgrade.upgrade_firmware(str(version) if version else "0")
print("Firmware upgraded to latest version")
except LibraryError as err:
if err.code == nisyscfg.errors.Status.RESOURCE_IS_SIMULATED:
print("Firmware upgrades not available for simulated aliases")
return
else:
print("Firmware Upgrade Failed")
print("Error: ", err)
return
def _get_hardware(session, alias):
hardware_filter = session.create_filter()
hardware_filter.user_alias = alias
hardware_found = next(iter(session.find_hardware(hardware_filter)))
return hardware_found
def _valid_alias(alias, session, rename_flag=False):
available_aliases = _available_aliases(session)
if alias in available_aliases:
return True
if rename_flag:
return False
print(
f"No items with matching alias '{alias}' Please retry with a valid alias. "
f"Use 'nisyscfgcli list' to see all available aliases.\n"
)
return False
def _available_aliases(session):
alias_filter = session.create_filter()
alias_filter.is_ni_product = True
alias_filter.is_device = True
aliases = []
for alias in session.find_hardware(alias_filter):
aliases.append(alias.expert_user_alias[0])
return aliases
def _print_hardware_info(alias, verbose):
if verbose:
attributes = {"Product Name": alias.get_property("product_name", default=""),
"IP Address": alias.get_property("tcp_ip_address", default=""),
"Slot": alias.get_property("slot_number", default=""),
"Serial Number": alias.get_property("serial_number", default=""),
"Vendor ID": alias.get_property("vendor_id", default=""),
"Product ID": alias.get_property("product_id", default=""),
"Firmware Revision": alias.get_property("firmware_revision", default=""),
"Current Temp": alias.get_property("current_temp", default=""),
"Number of Slots": alias.get_property("number_of_slots", default=""),
}
_print_alias_name(alias, verbose)
for label, attribute in attributes.items():
_print_tag(label, attribute)
print()
else:
_print_alias_name(alias, verbose)
def _print_tag(label, attribute):
if attribute:
print(f"--{label+':':<30}{attribute}")
def _print_alias_name(alias, verbose):
try:
print(alias.expert_user_alias[0])
except DeviceNotFoundError:
if verbose:
return
pass
if __name__ == '__main__':
nisyscfgcli.add_command(list_command)
nisyscfgcli.add_command(rename_command)
nisyscfgcli.add_command(delete_command)
nisyscfgcli.add_command(info_command)
nisyscfgcli.add_command(self_test_command)
nisyscfgcli.add_command(upgrade_firmware_command)
nisyscfgcli() | nisyscfgcli.py | import click
import nisyscfg
from nisyscfg.errors import LibraryError
class DeviceNotFoundError(Exception):
pass
@click.group()
def nisyscfgcli():
"""Manipulate hardware resources detected by NI System Configuration API
========================================================================"""
@nisyscfgcli.command(name="list")
@click.option(
"-v", is_flag=True, help=": displays detailed information about all aliases"
)
@click.option("-r",
default="",
help=": specify IP address or hostname of remote target"
)
def list_command(v, r):
""": displays all NI aliases"""
with nisyscfg.Session(r) as nisyscfg_session:
_list_aliases(v, r, nisyscfg_session)
@click.command(name="rename")
@click.argument("old_alias")
@click.argument("new_alias")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def rename_command(old_alias, new_alias, r):
""": change the alias of an item <old name> <new name>"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(old_alias, nisyscfg_session):
return
if _valid_alias(new_alias, nisyscfg_session, rename_flag=True):
print(f"The name '{new_alias}' is already in use. Try a different name.")
return
_rename_hardware(old_alias, new_alias, nisyscfg_session)
@click.command(name="delete")
@click.argument("alias_name")
@click.option("-y", is_flag=True, help=": Bypasses the '[y/n]?' check")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def delete_command(alias_name, y, r):
""": delete the specified item"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
delete_helper(y, alias_name, nisyscfg_session)
@click.command(name="info")
@click.argument("alias_name")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def info_command(alias_name, r):
""": provides detailed info about the specified alias"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
_info_alias(alias_name, nisyscfg_session)
@click.command(name="self_test")
@click.argument("alias_name")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def self_test_command(alias_name, r):
""": verifies alias is able to perform basic I/O functions"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
_self_test_alias(alias_name, nisyscfg_session)
@click.command(name="upgrade_firmware")
@click.argument("alias_name")
@click.option("-v", help=": update firmware to specified version")
@click.option("-r", help=": specify IP address or hostname of remote target", default="")
def upgrade_firmware_command(alias_name, v, r):
""": upgrades alias firmware to latest version"""
with nisyscfg.Session(r) as nisyscfg_session:
if not _valid_alias(alias_name, nisyscfg_session):
return
_upgrade_alias_firmware(alias_name, v, nisyscfg_session)
# ======================================================================================================================
def _list_aliases(verbose, r, session):
print(f"Scanning {r if r else 'localhost'} for devices...\n")
alias_filter = session.create_filter()
alias_filter.is_ni_product = True
alias_filter.is_device = True
for alias in session.find_hardware(alias_filter):
_print_hardware_info(alias, verbose)
def _rename_hardware(old_name, new_name, session):
hardware_to_rename = _get_hardware(session, old_name)
hardware_to_rename.rename(new_name)
print(f"Rename successful! '{old_name}' renamed to '{new_name}'")
def _del_hardware(alias, session):
hardware_to_delete = _get_hardware(session, alias)
hardware_to_delete.delete()
def delete_helper(flag, alias, session):
if flag:
_del_hardware(alias, session)
print(f"Item '{alias}' deleted.")
return
while True:
print(f"Are you sure you want to delete item {alias} [y/n]?")
response = input()
response = response.lower()
if response == "y" or response == "yes":
_del_hardware(alias, session)
print(f"Item {alias} deleted")
break
if response == "n" or response == "no":
print("Delete aborted")
break
else:
continue
def _info_alias(alias, session):
hardware_to_get_info = _get_hardware(session, alias)
_print_hardware_info(hardware_to_get_info, verbose=True)
def _self_test_alias(alias, session):
hardware_to_test = _get_hardware(session, alias)
try:
hardware_to_test.self_test()
print("Self test completed successfully!")
except LibraryError as err:
print("Self test failed")
print("Errors: ", err)
return
def _upgrade_alias_firmware(alias, version, session):
hardware_to_upgrade = _get_hardware(session, alias)
try:
hardware_to_upgrade.upgrade_firmware(str(version) if version else "0")
print("Firmware upgraded to latest version")
except LibraryError as err:
if err.code == nisyscfg.errors.Status.RESOURCE_IS_SIMULATED:
print("Firmware upgrades not available for simulated aliases")
return
else:
print("Firmware Upgrade Failed")
print("Error: ", err)
return
def _get_hardware(session, alias):
hardware_filter = session.create_filter()
hardware_filter.user_alias = alias
hardware_found = next(iter(session.find_hardware(hardware_filter)))
return hardware_found
def _valid_alias(alias, session, rename_flag=False):
available_aliases = _available_aliases(session)
if alias in available_aliases:
return True
if rename_flag:
return False
print(
f"No items with matching alias '{alias}' Please retry with a valid alias. "
f"Use 'nisyscfgcli list' to see all available aliases.\n"
)
return False
def _available_aliases(session):
alias_filter = session.create_filter()
alias_filter.is_ni_product = True
alias_filter.is_device = True
aliases = []
for alias in session.find_hardware(alias_filter):
aliases.append(alias.expert_user_alias[0])
return aliases
def _print_hardware_info(alias, verbose):
if verbose:
attributes = {"Product Name": alias.get_property("product_name", default=""),
"IP Address": alias.get_property("tcp_ip_address", default=""),
"Slot": alias.get_property("slot_number", default=""),
"Serial Number": alias.get_property("serial_number", default=""),
"Vendor ID": alias.get_property("vendor_id", default=""),
"Product ID": alias.get_property("product_id", default=""),
"Firmware Revision": alias.get_property("firmware_revision", default=""),
"Current Temp": alias.get_property("current_temp", default=""),
"Number of Slots": alias.get_property("number_of_slots", default=""),
}
_print_alias_name(alias, verbose)
for label, attribute in attributes.items():
_print_tag(label, attribute)
print()
else:
_print_alias_name(alias, verbose)
def _print_tag(label, attribute):
if attribute:
print(f"--{label+':':<30}{attribute}")
def _print_alias_name(alias, verbose):
try:
print(alias.expert_user_alias[0])
except DeviceNotFoundError:
if verbose:
return
pass
if __name__ == '__main__':
nisyscfgcli.add_command(list_command)
nisyscfgcli.add_command(rename_command)
nisyscfgcli.add_command(delete_command)
nisyscfgcli.add_command(info_command)
nisyscfgcli.add_command(self_test_command)
nisyscfgcli.add_command(upgrade_firmware_command)
nisyscfgcli() | 0.433022 | 0.123524 |
from load import ROOT as R
import numpy as N
import gna.constructors as C
from gna.bundle import *
class integral_2d1d_v01(TransformationBundleLegacy):
def __init__(self, *args, **kwargs):
TransformationBundleLegacy.__init__(self, *args, **kwargs)
self.check_cfg()
def check_cfg(self):
if not 'name' in self.cfg:
pkey = self.cfg.parent_key()
if not pkey:
raise Exception('"name" option is not provided for integral_1d_v01')
self.cfg.name = pkey
self.idx = self.cfg.indices
from gna.expression import NIndex
if not isinstance(self.idx, NIndex):
self.idx = NIndex(fromlist=self.cfg.indices)
try:
self.edges = N.ascontiguousarray(self.cfg.edges, dtype='d')
except:
raise Exception('Invalid binning definition: {!r}'.format(self.cfg.edges))
try:
self.xorders = N.ascontiguousarray(self.cfg.xorders, dtype='P')
except:
raise Exception('Invalid xorders definition: {!r}'.format(self.cfg.xorders))
if len(self.cfg.variables)!=2:
raise Exception('Two vairables should be provided')
def build(self):
if self.xorders.size>1:
if self.xorders.size+1 != self.edges.size:
raise Exception('Incompartible edges and xorders definition:\n {!r}\n {!r}'.format(self.edges, self.xorders))
self.integrator = R.GaussLegendre2d(self.edges, self.xorders, self.edges.size-1, -1.0, 1.0, self.cfg.yorder)
else:
self.integrator = R.GaussLegendre2d(self.edges, int(self.xorders[0]), self.edges.size-1, -1.0, 1.0, self.cfg.yorder)
self.integrator.points.setLabel('Gauss-Legendre 2d')
self.integrator.points.x.setLabel(self.cfg.variables[0])
self.integrator.points.xedges.setLabel('%s edges'%self.cfg.variables[0])
self.integrator.points.y.setLabel(self.cfg.variables[1])
self.set_output(self.integrator.points.x, self.cfg.variables[0])
self.set_output(self.integrator.points.xedges, '%s_edges'%self.cfg.variables[0])
self.set_output(self.integrator.points.xhist, '%s_hist'%self.cfg.variables[0])
self.set_output(self.integrator.points.y, self.cfg.variables[1])
for i, it in enumerate(self.idx.iterate()):
hist = R.GaussLegendre2dHist(self.integrator)
hist.hist.setLabel(it.current_format(name='hist'))
self.set_input( hist.hist.f, self.cfg.name, it, clone=0)
self.set_output(hist.hist.hist, self.cfg.name, it)
def define_variables(self):
pass | packages/legacy/bundles/integral_2d1d_v01.py | from load import ROOT as R
import numpy as N
import gna.constructors as C
from gna.bundle import *
class integral_2d1d_v01(TransformationBundleLegacy):
def __init__(self, *args, **kwargs):
TransformationBundleLegacy.__init__(self, *args, **kwargs)
self.check_cfg()
def check_cfg(self):
if not 'name' in self.cfg:
pkey = self.cfg.parent_key()
if not pkey:
raise Exception('"name" option is not provided for integral_1d_v01')
self.cfg.name = pkey
self.idx = self.cfg.indices
from gna.expression import NIndex
if not isinstance(self.idx, NIndex):
self.idx = NIndex(fromlist=self.cfg.indices)
try:
self.edges = N.ascontiguousarray(self.cfg.edges, dtype='d')
except:
raise Exception('Invalid binning definition: {!r}'.format(self.cfg.edges))
try:
self.xorders = N.ascontiguousarray(self.cfg.xorders, dtype='P')
except:
raise Exception('Invalid xorders definition: {!r}'.format(self.cfg.xorders))
if len(self.cfg.variables)!=2:
raise Exception('Two vairables should be provided')
def build(self):
if self.xorders.size>1:
if self.xorders.size+1 != self.edges.size:
raise Exception('Incompartible edges and xorders definition:\n {!r}\n {!r}'.format(self.edges, self.xorders))
self.integrator = R.GaussLegendre2d(self.edges, self.xorders, self.edges.size-1, -1.0, 1.0, self.cfg.yorder)
else:
self.integrator = R.GaussLegendre2d(self.edges, int(self.xorders[0]), self.edges.size-1, -1.0, 1.0, self.cfg.yorder)
self.integrator.points.setLabel('Gauss-Legendre 2d')
self.integrator.points.x.setLabel(self.cfg.variables[0])
self.integrator.points.xedges.setLabel('%s edges'%self.cfg.variables[0])
self.integrator.points.y.setLabel(self.cfg.variables[1])
self.set_output(self.integrator.points.x, self.cfg.variables[0])
self.set_output(self.integrator.points.xedges, '%s_edges'%self.cfg.variables[0])
self.set_output(self.integrator.points.xhist, '%s_hist'%self.cfg.variables[0])
self.set_output(self.integrator.points.y, self.cfg.variables[1])
for i, it in enumerate(self.idx.iterate()):
hist = R.GaussLegendre2dHist(self.integrator)
hist.hist.setLabel(it.current_format(name='hist'))
self.set_input( hist.hist.f, self.cfg.name, it, clone=0)
self.set_output(hist.hist.hist, self.cfg.name, it)
def define_variables(self):
pass | 0.391988 | 0.113973 |
# Big up for @ochsff for allowing me to rip off part of his code.
import os
import re
import sys
import time
import shutil
import argparse
def color(text, color_code):
if sys.platform == "win32":
return text
return chr(0x1b) + "[" + str(color_code) + "m" + text + chr(0x1b) + "[0m"
def red(text):
return color(text, 31)
def green(text):
return color(text, 32)
def yellow(text):
return color(text, 33)
def bold(text):
return color(text, 1)
try:
import yaml
from markdown import Markdown
from markdown.preprocessors import Preprocessor
from jinja2.loaders import FileSystemLoader
from jinja2.environment import Environment
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
except ImportError as e:
print(red("Unable to import dependency:") + str(e))
sys.exit(-1)
class CodeBlockPreprocessor(Preprocessor):
"""This converts code blocks into highlighted code.
Neat stuff."""
# Compile regexp, matching [code][/code] blocks.
pattern = re.compile(r"\[code:(.+?)\](.+?)\[/code\]", re.S)
# Add style formatter.
formatter = HtmlFormatter(noclasses=True, cssclass="colorful")
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
return "\n{0}\n".format(code)
return [self.pattern.sub(repl, "\n".join(lines))]
def generate_static(destination):
"""Copy over static files to the destination.
Generally includes JavaScript, CSS, pictures, etc.
"""
# Check if there is a local static folder to copy over to destination.
if not os.path.exists("static"):
print("Unable to find local static folder... " + red("abort"))
return
print("Installing static folder to destination..."),
# Loop through every entry in the local static folder.
for entry in os.listdir("static"):
orig = os.path.join("static", entry)
dest = os.path.join(destination, entry)
# If the destination folder/file already exists, we delete it and
# rewrite everything again.
if os.path.exists(dest):
if os.path.isfile(dest):
os.remove(dest)
else:
shutil.rmtree(dest)
try:
# If the current entry is a file, copy that alone.
if os.path.isfile(orig):
shutil.copy(orig, dest)
# If it's a directory, copy it recursively.
else:
shutil.copytree(orig, dest)
except Exception as e:
print("hmm")
continue
print(green("done"))
def generate_pages(destination):
"""Generate static pages.
"""
# Check if the pages folder exists.
if not os.path.exists("pages"):
print("Unable to find pages folder..." + red("abort"))
return
# Load template engine.
env = Environment()
# Load all files from the pages folder and from the template
# folder, to get the skeleton and the content.
env.loader = FileSystemLoader(["pages", "template"])
# Loop through all static pages.
for page in os.listdir("pages"):
dest = os.path.join(destination, page)
print("Generating page {0}...".format(dest)),
# Load the template for the current page.
template = env.get_template(page)
# Generate the HTML output for the static page.
html = template.render({"page" : page})
# Write the HTML to the destination file.
with open(dest, "w") as handle:
handle.write(html)
print(green("done"))
def generate_posts(destination):
"""Generate dynamic blog posts.
"""
# Check if the posts folder exists.
if not os.path.exists("posts"):
print("Unable to find posts folder..." + red("abort"))
return
posts = []
# Load template engine.
env = Environment()
# Load the template files, base and post.
env.loader = FileSystemLoader("template")
for post in os.listdir("posts"):
orig = os.path.join("posts", post)
if os.path.isdir(orig):
print("Entry \"{0}\" is a directory, {1}".format(post, yellow("skip")))
continue
print("Processing \"{0}\"...".format(orig)),
# Read the raw content of the blog post.
raw = open(orig, "r").read()
# Split headers and content, they're separated by the first
# empty line.
headers, content = raw.split("\n\n", 1)
# Load YAML headers.
headers = yaml.load(headers)
# Initialize Markdown processor.
md = Markdown()
# Add the source code pre-processor.
md.preprocessors.add("sourcecode", CodeBlockPreprocessor(), "_begin")
# Generate the HTML conversion of the original Markdown content.
content = md.convert(content)
print(green("done"))
# If the user specified a date use it, otherwise generate it.
if headers.has_key("Date"):
date = headers["Date"]
else:
date = time.strftime("%Y-%m-%d %H:%M:%S")
# Generate post descriptor.
post_object = dict(
date=date,
title=headers["Title"],
slug=headers["Slug"],
author=headers["Author"],
content=content,
link=None
)
# This is where we're going to generate the final HTML blog post.
file_name = "{0}-{1}.html".format(str(post_object["date"])[:10], post_object["slug"])
dest = os.path.join(destination, file_name)
# If the blog post already exists, delete it.
if not os.path.exists(dest):
print("Generating HTML blog post at \"{0}\"...".format(dest)),
# Load basic blog post template.
template = env.get_template("post.html")
# Generate the HTML content.
html = template.render(**post_object)
# Create the HTML file.
with open(dest, "w") as handle:
handle.write(html)
print(green("done"))
else:
print("Post already exists, delete manually if needed... " + yellow("skip"))
# Add the new file name to the post object.
post_object["link"] = file_name
# Add the generated post to the overall list.
posts.append(post_object)
# Order blog posts from recent to older.
posts.sort(key=lambda key: key["date"])
posts.reverse()
return posts
def generate_index(posts, destination):
"""This generate the index page with the list of blog posts.
"""
dest = os.path.join(destination, "index.html")
print("Generating blog index..."),
# Load template engine.
env = Environment()
# Load the template files, base and post.
env.loader = FileSystemLoader("template")
# Load template file.
tpl = env.get_template("index.html")
# Generate HTML content.
first = posts.pop(0)
html = tpl.render({"page" : "index", "first" : first, "posts" : posts})
# Create file.
with open(dest, "w") as handle:
handle.write(html)
print(green("done"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--static", help="Install static files", action="store_true", default=False)
parser.add_argument("-p", "--pages", help="Generate static pages", action="store_true", default=False)
parser.add_argument("-d", "--destination", help="Specify the destination folder where to install the files", required=True)
args = parser.parse_args()
if not os.path.exists(args.destination):
print("The destination folder does not exist, create it... " + red("abort"))
return
# If requested to do so, generate static pages.
if args.pages:
generate_pages(args.destination)
# If requested to do so, install static files.
if args.static:
generate_static(args.destination)
# Generate dynamic blog posts.
posts = generate_posts(args.destination)
generate_index(posts, args.destination)
if __name__ == "__main__":
main() | habu.py |
# Big up for @ochsff for allowing me to rip off part of his code.
import os
import re
import sys
import time
import shutil
import argparse
def color(text, color_code):
if sys.platform == "win32":
return text
return chr(0x1b) + "[" + str(color_code) + "m" + text + chr(0x1b) + "[0m"
def red(text):
return color(text, 31)
def green(text):
return color(text, 32)
def yellow(text):
return color(text, 33)
def bold(text):
return color(text, 1)
try:
import yaml
from markdown import Markdown
from markdown.preprocessors import Preprocessor
from jinja2.loaders import FileSystemLoader
from jinja2.environment import Environment
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
except ImportError as e:
print(red("Unable to import dependency:") + str(e))
sys.exit(-1)
class CodeBlockPreprocessor(Preprocessor):
"""This converts code blocks into highlighted code.
Neat stuff."""
# Compile regexp, matching [code][/code] blocks.
pattern = re.compile(r"\[code:(.+?)\](.+?)\[/code\]", re.S)
# Add style formatter.
formatter = HtmlFormatter(noclasses=True, cssclass="colorful")
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
return "\n{0}\n".format(code)
return [self.pattern.sub(repl, "\n".join(lines))]
def generate_static(destination):
"""Copy over static files to the destination.
Generally includes JavaScript, CSS, pictures, etc.
"""
# Check if there is a local static folder to copy over to destination.
if not os.path.exists("static"):
print("Unable to find local static folder... " + red("abort"))
return
print("Installing static folder to destination..."),
# Loop through every entry in the local static folder.
for entry in os.listdir("static"):
orig = os.path.join("static", entry)
dest = os.path.join(destination, entry)
# If the destination folder/file already exists, we delete it and
# rewrite everything again.
if os.path.exists(dest):
if os.path.isfile(dest):
os.remove(dest)
else:
shutil.rmtree(dest)
try:
# If the current entry is a file, copy that alone.
if os.path.isfile(orig):
shutil.copy(orig, dest)
# If it's a directory, copy it recursively.
else:
shutil.copytree(orig, dest)
except Exception as e:
print("hmm")
continue
print(green("done"))
def generate_pages(destination):
"""Generate static pages.
"""
# Check if the pages folder exists.
if not os.path.exists("pages"):
print("Unable to find pages folder..." + red("abort"))
return
# Load template engine.
env = Environment()
# Load all files from the pages folder and from the template
# folder, to get the skeleton and the content.
env.loader = FileSystemLoader(["pages", "template"])
# Loop through all static pages.
for page in os.listdir("pages"):
dest = os.path.join(destination, page)
print("Generating page {0}...".format(dest)),
# Load the template for the current page.
template = env.get_template(page)
# Generate the HTML output for the static page.
html = template.render({"page" : page})
# Write the HTML to the destination file.
with open(dest, "w") as handle:
handle.write(html)
print(green("done"))
def generate_posts(destination):
"""Generate dynamic blog posts.
"""
# Check if the posts folder exists.
if not os.path.exists("posts"):
print("Unable to find posts folder..." + red("abort"))
return
posts = []
# Load template engine.
env = Environment()
# Load the template files, base and post.
env.loader = FileSystemLoader("template")
for post in os.listdir("posts"):
orig = os.path.join("posts", post)
if os.path.isdir(orig):
print("Entry \"{0}\" is a directory, {1}".format(post, yellow("skip")))
continue
print("Processing \"{0}\"...".format(orig)),
# Read the raw content of the blog post.
raw = open(orig, "r").read()
# Split headers and content, they're separated by the first
# empty line.
headers, content = raw.split("\n\n", 1)
# Load YAML headers.
headers = yaml.load(headers)
# Initialize Markdown processor.
md = Markdown()
# Add the source code pre-processor.
md.preprocessors.add("sourcecode", CodeBlockPreprocessor(), "_begin")
# Generate the HTML conversion of the original Markdown content.
content = md.convert(content)
print(green("done"))
# If the user specified a date use it, otherwise generate it.
if headers.has_key("Date"):
date = headers["Date"]
else:
date = time.strftime("%Y-%m-%d %H:%M:%S")
# Generate post descriptor.
post_object = dict(
date=date,
title=headers["Title"],
slug=headers["Slug"],
author=headers["Author"],
content=content,
link=None
)
# This is where we're going to generate the final HTML blog post.
file_name = "{0}-{1}.html".format(str(post_object["date"])[:10], post_object["slug"])
dest = os.path.join(destination, file_name)
# If the blog post already exists, delete it.
if not os.path.exists(dest):
print("Generating HTML blog post at \"{0}\"...".format(dest)),
# Load basic blog post template.
template = env.get_template("post.html")
# Generate the HTML content.
html = template.render(**post_object)
# Create the HTML file.
with open(dest, "w") as handle:
handle.write(html)
print(green("done"))
else:
print("Post already exists, delete manually if needed... " + yellow("skip"))
# Add the new file name to the post object.
post_object["link"] = file_name
# Add the generated post to the overall list.
posts.append(post_object)
# Order blog posts from recent to older.
posts.sort(key=lambda key: key["date"])
posts.reverse()
return posts
def generate_index(posts, destination):
"""This generate the index page with the list of blog posts.
"""
dest = os.path.join(destination, "index.html")
print("Generating blog index..."),
# Load template engine.
env = Environment()
# Load the template files, base and post.
env.loader = FileSystemLoader("template")
# Load template file.
tpl = env.get_template("index.html")
# Generate HTML content.
first = posts.pop(0)
html = tpl.render({"page" : "index", "first" : first, "posts" : posts})
# Create file.
with open(dest, "w") as handle:
handle.write(html)
print(green("done"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--static", help="Install static files", action="store_true", default=False)
parser.add_argument("-p", "--pages", help="Generate static pages", action="store_true", default=False)
parser.add_argument("-d", "--destination", help="Specify the destination folder where to install the files", required=True)
args = parser.parse_args()
if not os.path.exists(args.destination):
print("The destination folder does not exist, create it... " + red("abort"))
return
# If requested to do so, generate static pages.
if args.pages:
generate_pages(args.destination)
# If requested to do so, install static files.
if args.static:
generate_static(args.destination)
# Generate dynamic blog posts.
posts = generate_posts(args.destination)
generate_index(posts, args.destination)
if __name__ == "__main__":
main() | 0.4917 | 0.144059 |
import json
from decaychain.MeasurementUnit import Concentration
from decaychain.MeasurementUnit import Time
from decaychain.Nuclide import Nuclide
from decaychain.DecayChain import Generator
class Request:
def __init__(self, request_dict):
self.measurements = request_dict.get('measurements')
self.target_time_value = request_dict.get('target_time_value')
self.target_time_unit = request_dict.get('target_time_unit')
class Result:
def __init__(self, nuclide: Nuclide, concentration: Concentration, time: Time):
self.nuclide = nuclide
self.concentration = concentration
self.time = time
def __json__(self, request):
return {
'nuclide': self.nuclide,
'calculation': {
'concentration': self.concentration,
'time': self.time
}
}
class Calculator:
def __init__(self):
self.chain_generator = Generator()
def calculate(self, decay_calculation_request: Request) -> [Result]:
result = []
target_time = Time(
value=decay_calculation_request.target_time_value,
unit=decay_calculation_request.target_time_unit
)
for measurement in decay_calculation_request.measurements:
nuclide_name = measurement.get('nuclide_name')
chains = self.chain_generator.get_for_nuclide_name(nuclide_name)
chains_names_only = [
chain.get_nuclide_names()
for chain in chains
]
print(json.dumps(chains_names_only, indent=4))
initial_concentration = Concentration(
value=measurement.get('concentration_value'),
unit=measurement.get('concentration_unit')
)
calculated_concentration = nuclide.calculate_concentration_at_time(initial_concentration, target_time)
result.append(
Result(
nuclide=nuclide,
concentration=calculated_concentration,
time=target_time
)
)
return result | decaychain/DecayCalculation.py | import json
from decaychain.MeasurementUnit import Concentration
from decaychain.MeasurementUnit import Time
from decaychain.Nuclide import Nuclide
from decaychain.DecayChain import Generator
class Request:
def __init__(self, request_dict):
self.measurements = request_dict.get('measurements')
self.target_time_value = request_dict.get('target_time_value')
self.target_time_unit = request_dict.get('target_time_unit')
class Result:
def __init__(self, nuclide: Nuclide, concentration: Concentration, time: Time):
self.nuclide = nuclide
self.concentration = concentration
self.time = time
def __json__(self, request):
return {
'nuclide': self.nuclide,
'calculation': {
'concentration': self.concentration,
'time': self.time
}
}
class Calculator:
def __init__(self):
self.chain_generator = Generator()
def calculate(self, decay_calculation_request: Request) -> [Result]:
result = []
target_time = Time(
value=decay_calculation_request.target_time_value,
unit=decay_calculation_request.target_time_unit
)
for measurement in decay_calculation_request.measurements:
nuclide_name = measurement.get('nuclide_name')
chains = self.chain_generator.get_for_nuclide_name(nuclide_name)
chains_names_only = [
chain.get_nuclide_names()
for chain in chains
]
print(json.dumps(chains_names_only, indent=4))
initial_concentration = Concentration(
value=measurement.get('concentration_value'),
unit=measurement.get('concentration_unit')
)
calculated_concentration = nuclide.calculate_concentration_at_time(initial_concentration, target_time)
result.append(
Result(
nuclide=nuclide,
concentration=calculated_concentration,
time=target_time
)
)
return result | 0.774754 | 0.250867 |
from jinjabread.functions.salt import saltyaml
import yaml
from jinja2 import Template
import re
import traceback
def lookup(d, key):
''' recursive dictionary lookup until value for a key is found '''
stack = d.items()
while stack:
k, v = stack.pop()
if isinstance(v, dict):
stack.extend(v.iteritems())
else:
try:
if k == key:
return v
except:
pass
def pre_render_test(state):
pre_state_format = []
# avoid pre-render errors by commenting out jinja code, but still retaining codeplacement for debug messages.
for line in state.splitlines():
if "{%" in line or "{#" in line or "#}" in line:
line = "#"+line
if "{{" in line and "}}" in line:
line = line.replace("{{", "'{{").replace("}}", "}}'")
pre_state_format.append(line)
#print line
pre_state = '\n'.join(pre_state_format)
pre_test_render = saltyaml.render(pre_state)
return pre_state
def post_render_test(rendered_template):
filtered_template = "\n".join([ll.rstrip() for ll in rendered_template.splitlines() if ll.strip()])
prettify = []
for line in filtered_template.splitlines():
if not line.startswith(' '):
line = "\n"+line
prettify.append(line)
filtered_template = '\n'.join(prettify)
test_render = saltyaml.render(rendered_template)
return [filtered_template, test_render]
def mash(grains, pillar, state):
''' Takes yaml and jinja templates, and render them as yaml'''
try:
yamlpillar = yaml.safe_load(pillar)
yamlgrain = yaml.safe_load(grains)
def pillarget(item):
if ":" in item:
item = item.split(':')
return lookup(yamlpillar, item[-1]) # this is bad. It disregards nested keys thus every key needing to be unique to avoid errors
return yamlpillar[item]
def grainsget(item):
if ":" in item:
item = item.split(':')
value = lookup(yamlgrain, item[-1])
return value
return yamlgrain[item]
salt = {'pillar.get':pillarget, 'grains.get':grainsget}
# check yaml syntax before rendering
pre_render_test(state)
# render state
template = Template(state)
rendered_template = template.render({'pillar':yamlpillar, 'grains':yamlgrain, 'salt':salt})
# check yaml syntax after rendering
completed_render = post_render_test(rendered_template)
return completed_render
except Exception as e:
return [str(e).replace("could not found","could not find"), "fail"]
dummydata = {"pillar":["""\
secret: 1A2B3C4D5E6F
ns:
- 172.16.17.32
- 172.16.17.32
"""],
"grains":["""\
host: minion
id: minion.domain.net
domain: domain.net
fqdn: minion.domain.net
fqdn_ip4:
- 192.168.1.3
saltversion: 2016.3.3
os: Debian
os_family: Debian
osarch: amd64
oscodename: jessie
osfinger: Debian-8
osfullname: Debian
osmajorrelease: '8'
osrelease: '8.2'
biosreleasedate: 01/01/2011
biosversion: Bochs
cpuarch: x86_64
ip4_nameservers:
- 8.8.8.8
- 8.8.4.4
ip4_interfaces:
eth0:
- 192.168.1.3
kernel: Linux
localhost: minion.domain.net
mem_total: 2010
num_cpus: 2
"""],
"state":["""\
{% set secret = pillar['secret'] %}
{% set domain = grains['domain'] %}
{% set osfinger = salt['grains.get']('osfinger') %}
{% set nameservers = salt['pillar.get']('ns') %}
{% if domain == 'domain.net' %}
set secret:
file.managed:
- name: /etc/secret
- source: salt://state/secret
- template: jinja
- context:
secret: {{ secret }}
{% endif %}
asd
eth0:
network.managed:
- dns:
{% for ip in nameservers %}
- {{ ip }}
{% endfor %}
"""]}
#print mash(dummydata['grains'][0], dummydata['pillar'][0], dummydata['state'][0])[0] | jinjabread/functions/render_state.py | from jinjabread.functions.salt import saltyaml
import yaml
from jinja2 import Template
import re
import traceback
def lookup(d, key):
''' recursive dictionary lookup until value for a key is found '''
stack = d.items()
while stack:
k, v = stack.pop()
if isinstance(v, dict):
stack.extend(v.iteritems())
else:
try:
if k == key:
return v
except:
pass
def pre_render_test(state):
pre_state_format = []
# avoid pre-render errors by commenting out jinja code, but still retaining codeplacement for debug messages.
for line in state.splitlines():
if "{%" in line or "{#" in line or "#}" in line:
line = "#"+line
if "{{" in line and "}}" in line:
line = line.replace("{{", "'{{").replace("}}", "}}'")
pre_state_format.append(line)
#print line
pre_state = '\n'.join(pre_state_format)
pre_test_render = saltyaml.render(pre_state)
return pre_state
def post_render_test(rendered_template):
filtered_template = "\n".join([ll.rstrip() for ll in rendered_template.splitlines() if ll.strip()])
prettify = []
for line in filtered_template.splitlines():
if not line.startswith(' '):
line = "\n"+line
prettify.append(line)
filtered_template = '\n'.join(prettify)
test_render = saltyaml.render(rendered_template)
return [filtered_template, test_render]
def mash(grains, pillar, state):
''' Takes yaml and jinja templates, and render them as yaml'''
try:
yamlpillar = yaml.safe_load(pillar)
yamlgrain = yaml.safe_load(grains)
def pillarget(item):
if ":" in item:
item = item.split(':')
return lookup(yamlpillar, item[-1]) # this is bad. It disregards nested keys thus every key needing to be unique to avoid errors
return yamlpillar[item]
def grainsget(item):
if ":" in item:
item = item.split(':')
value = lookup(yamlgrain, item[-1])
return value
return yamlgrain[item]
salt = {'pillar.get':pillarget, 'grains.get':grainsget}
# check yaml syntax before rendering
pre_render_test(state)
# render state
template = Template(state)
rendered_template = template.render({'pillar':yamlpillar, 'grains':yamlgrain, 'salt':salt})
# check yaml syntax after rendering
completed_render = post_render_test(rendered_template)
return completed_render
except Exception as e:
return [str(e).replace("could not found","could not find"), "fail"]
dummydata = {"pillar":["""\
secret: 1A2B3C4D5E6F
ns:
- 172.16.17.32
- 172.16.17.32
"""],
"grains":["""\
host: minion
id: minion.domain.net
domain: domain.net
fqdn: minion.domain.net
fqdn_ip4:
- 192.168.1.3
saltversion: 2016.3.3
os: Debian
os_family: Debian
osarch: amd64
oscodename: jessie
osfinger: Debian-8
osfullname: Debian
osmajorrelease: '8'
osrelease: '8.2'
biosreleasedate: 01/01/2011
biosversion: Bochs
cpuarch: x86_64
ip4_nameservers:
- 8.8.8.8
- 8.8.4.4
ip4_interfaces:
eth0:
- 192.168.1.3
kernel: Linux
localhost: minion.domain.net
mem_total: 2010
num_cpus: 2
"""],
"state":["""\
{% set secret = pillar['secret'] %}
{% set domain = grains['domain'] %}
{% set osfinger = salt['grains.get']('osfinger') %}
{% set nameservers = salt['pillar.get']('ns') %}
{% if domain == 'domain.net' %}
set secret:
file.managed:
- name: /etc/secret
- source: salt://state/secret
- template: jinja
- context:
secret: {{ secret }}
{% endif %}
asd
eth0:
network.managed:
- dns:
{% for ip in nameservers %}
- {{ ip }}
{% endfor %}
"""]}
#print mash(dummydata['grains'][0], dummydata['pillar'][0], dummydata['state'][0])[0] | 0.257392 | 0.217566 |
import re
import sys
import pytest
from num2words import num2words
def strip_non_stress(pronunciation):
return [c for c in pronunciation if c in "012"]
cached_stresses = None
def load_stresses():
global cached_stresses # hack for fast tests
if cached_stresses is not None:
return cached_stresses
result = {}
for line in open("cmudict-0.7b", encoding="latin1"):
if line.startswith(";;;"):
continue
word, stresses = line.split(" ")
stresses = strip_non_stress(stresses)
result[word] = stresses
cached_stresses = result
return result
def sound_out(*, stresses, token):
if token[0] == "'" and token[-1] == "'":
token = token[1:-1]
if token in stresses or token.isdigit():
return [token]
else:
if len(token) >= 2:
print("sounding out", token, file=sys.stderr)
return [c for c in token]
def split_token(*, stresses, token):
result = token.split("_")
result = [x.upper() for x in result]
result = [x for x in result if len(x) >= 1]
result = [part for token in result for part in sound_out(token=token, stresses=stresses)]
return result
def test_split_token():
stresses = load_stresses()
assert split_token(token="foo_bar", stresses=stresses) == ["FOO", "BAR"]
assert split_token(token="FOO_BAR", stresses=stresses) == ["FOO", "BAR"]
assert split_token(token="QXPAZ", stresses=stresses) == ['Q', 'X', 'P', 'A', 'Z']
assert split_token(token="11", stresses=stresses) == ["11"]
def basic_split(line):
return re.findall("""[A-Za-z_0-9']+|[^\s]| """, line)
def tokenize(*, stresses, line):
result = basic_split(line)
result = [part for token in result for part in split_token(token=token, stresses=stresses)]
return result
def test_tokenize():
stresses = load_stresses()
assert tokenize(line="int main_fun() {", stresses=stresses) == ["INT", " ", "MAIN", "FUN", '(', ')', ' ', '{']
def syllables_of_token(*, token, stresses):
if token == "=":
token = "EQUALS"
if token == "&":
token = "AMPERSAND"
if token == ".":
token = "DOT"
if token == ":":
token = "COLON"
if token == "-":
token = "MINUS"
if token == "+":
token = "PLUS"
if token == '%':
token = "PERCENT"
if token.isdigit():
elts = num2words(int(token)).upper().replace("-", " ").split()
return [s for elt in elts for s in syllables_of_token(token=elt, stresses=stresses)]
if len(token) == 1 and not token.isalpha():
return []
if token in stresses:
return [(s, token) for s in stresses[token]]
raise Exception("unrecognized token: " + token)
def is_valid_iamb(syllables):
if syllables[-1][0] == '0': # this means unstressed in CMU dictionary
return False
syllables_ok = len(syllables) == 2
if len(syllables) == 3 and syllables[-2][1] == "THE":
syllables_ok = True
if len(syllables) == 3 and syllables[0] == syllables[1] and syllables[0][0] == '0':
syllables_ok = True
return syllables_ok
def main(input_lines):
stresses = load_stresses()
for index, line in enumerate(input_lines):
iambs = []
def err(msg):
msg = "on line {}: {}\niambs: {}\nthe line is: {}".format(index+1, msg, iambs, line)
raise Exception(msg)
tokens = tokenize(line=line, stresses=stresses)
syllables = [s for token in tokens for s in syllables_of_token(stresses=stresses, token=token)]
while syllables != []:
ok = False
for index in range(len(syllables), 0, -1):
prefix = syllables[:index]
suffix = syllables[index:]
if is_valid_iamb(prefix):
iambs.append(prefix)
syllables = suffix
ok = True
break
if not ok:
base_tokens = [s[1] for s in syllables]
tokens = []
for t in base_tokens:
if tokens == [] or tokens[-1] != t:
tokens.append(t)
err("no prefix of {} is an iamb".format(syllables))
expected_iambs = 5
if len(iambs) != expected_iambs and len(iambs) != 0:
err("expected {} iambs, found {}".format(expected_iambs, len(iambs)))
def test_main():
main([])
main(["{}"])
main(["But, soft! what light through yonder window breaks?"])
with pytest.raises(Exception):
main(["But, soft! what light through yonder window breaks, bro?"])
with pytest.raises(Exception):
main(open("test2.cpp"))
# main(open("test1.cpp"))
if __name__ == "__main__":
main(sys.stdin) | iamb.py | import re
import sys
import pytest
from num2words import num2words
def strip_non_stress(pronunciation):
return [c for c in pronunciation if c in "012"]
cached_stresses = None
def load_stresses():
global cached_stresses # hack for fast tests
if cached_stresses is not None:
return cached_stresses
result = {}
for line in open("cmudict-0.7b", encoding="latin1"):
if line.startswith(";;;"):
continue
word, stresses = line.split(" ")
stresses = strip_non_stress(stresses)
result[word] = stresses
cached_stresses = result
return result
def sound_out(*, stresses, token):
if token[0] == "'" and token[-1] == "'":
token = token[1:-1]
if token in stresses or token.isdigit():
return [token]
else:
if len(token) >= 2:
print("sounding out", token, file=sys.stderr)
return [c for c in token]
def split_token(*, stresses, token):
result = token.split("_")
result = [x.upper() for x in result]
result = [x for x in result if len(x) >= 1]
result = [part for token in result for part in sound_out(token=token, stresses=stresses)]
return result
def test_split_token():
stresses = load_stresses()
assert split_token(token="foo_bar", stresses=stresses) == ["FOO", "BAR"]
assert split_token(token="FOO_BAR", stresses=stresses) == ["FOO", "BAR"]
assert split_token(token="QXPAZ", stresses=stresses) == ['Q', 'X', 'P', 'A', 'Z']
assert split_token(token="11", stresses=stresses) == ["11"]
def basic_split(line):
return re.findall("""[A-Za-z_0-9']+|[^\s]| """, line)
def tokenize(*, stresses, line):
result = basic_split(line)
result = [part for token in result for part in split_token(token=token, stresses=stresses)]
return result
def test_tokenize():
stresses = load_stresses()
assert tokenize(line="int main_fun() {", stresses=stresses) == ["INT", " ", "MAIN", "FUN", '(', ')', ' ', '{']
def syllables_of_token(*, token, stresses):
if token == "=":
token = "EQUALS"
if token == "&":
token = "AMPERSAND"
if token == ".":
token = "DOT"
if token == ":":
token = "COLON"
if token == "-":
token = "MINUS"
if token == "+":
token = "PLUS"
if token == '%':
token = "PERCENT"
if token.isdigit():
elts = num2words(int(token)).upper().replace("-", " ").split()
return [s for elt in elts for s in syllables_of_token(token=elt, stresses=stresses)]
if len(token) == 1 and not token.isalpha():
return []
if token in stresses:
return [(s, token) for s in stresses[token]]
raise Exception("unrecognized token: " + token)
def is_valid_iamb(syllables):
if syllables[-1][0] == '0': # this means unstressed in CMU dictionary
return False
syllables_ok = len(syllables) == 2
if len(syllables) == 3 and syllables[-2][1] == "THE":
syllables_ok = True
if len(syllables) == 3 and syllables[0] == syllables[1] and syllables[0][0] == '0':
syllables_ok = True
return syllables_ok
def main(input_lines):
stresses = load_stresses()
for index, line in enumerate(input_lines):
iambs = []
def err(msg):
msg = "on line {}: {}\niambs: {}\nthe line is: {}".format(index+1, msg, iambs, line)
raise Exception(msg)
tokens = tokenize(line=line, stresses=stresses)
syllables = [s for token in tokens for s in syllables_of_token(stresses=stresses, token=token)]
while syllables != []:
ok = False
for index in range(len(syllables), 0, -1):
prefix = syllables[:index]
suffix = syllables[index:]
if is_valid_iamb(prefix):
iambs.append(prefix)
syllables = suffix
ok = True
break
if not ok:
base_tokens = [s[1] for s in syllables]
tokens = []
for t in base_tokens:
if tokens == [] or tokens[-1] != t:
tokens.append(t)
err("no prefix of {} is an iamb".format(syllables))
expected_iambs = 5
if len(iambs) != expected_iambs and len(iambs) != 0:
err("expected {} iambs, found {}".format(expected_iambs, len(iambs)))
def test_main():
main([])
main(["{}"])
main(["But, soft! what light through yonder window breaks?"])
with pytest.raises(Exception):
main(["But, soft! what light through yonder window breaks, bro?"])
with pytest.raises(Exception):
main(open("test2.cpp"))
# main(open("test1.cpp"))
if __name__ == "__main__":
main(sys.stdin) | 0.235812 | 0.363393 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os import system, name
import sys
import mysql.connector
from statistics import mean
def conexion_sql():
conexion=mysql.connector.connect(host="localhost",
user="-",
passwd="-",
database="-")
return conexion
def clear():
if name == 'nt':
s = system('cls')
def opcion0(cursor,conexion):
"""
Opción para tomar las mediciones del día. Se preguntan apellidos, nombre, edad, sexo
y una serie de datos cualitativos que pueden ser indicadores del COVID-19. Se busca a la persona en
la base de datos y se crea un nuevo registro de sus mediciones con la fecha actual. Después de
seleccionar esta opción la persona debe utilizar la estación de toma de datos
biométricos para mandar los datos cuantitativos.
"""
try:
clear()
print("\nMIS MEDICIONES DE HOY")
print("\nIngrese sus datos:")
apellidos = input("Ingresa tus apellidos: ")
nombres = input("Ingresa tu nombre(s): ")
edad = input("Ingresa tu edad: ")
sexo = input("Ingresa tu sexo [M para masculino, F para femenino, O para otro]: ")
CP = input("Ingrese el código postal del lugar donde se encuentra: ")
print("\nEn la última semana ...")
congestion_nasal = input("¿Ha tenido congestion nasal? Ingrese si/no: ")
dolor_cabeza = input("¿Ha tenido dolor de cabeza? Ingrese si/no: ")
tos_seca = input("¿Ha tenido tos seca? Ingrese si/no: ")
cansancio = input("¿Ha sentido cansancio fuera de lo normal? Ingrese si/no: ")
if(sexo.lower() in ['m','hombre']):
sexo = 'masculino'
elif(sexo.lower() in ['f','mujer']):
sexo = 'femenino'
elif(sexo.lower() in ['o','otro','0']):
sexo = 'otro'
query = f'SELECT ID_usuario FROM persona WHERE nombres = "{nombres}" AND apellidos = "{apellidos}" AND sexo = "{sexo}" AND TIMESTAMPDIFF(YEAR, persona.fecha_nacimiento,CURDATE()) = {edad}'
cursor.execute(query)
bd = pd.DataFrame(columns = ["ID_usuario"])
for fila in cursor:
bd = bd.append({"ID_usuario":fila[0]}, ignore_index=True)
ID_usuario = bd.iat[0,0]
print(ID_usuario)
conexion.close()
conexion2 = conexion_sql()
cursor2 = conexion2.cursor()
query2 = f'INSERT INTO datos_biometricos (ID_usuario,codigo_postal,congestion_nasal,dolor_cabeza, tos_seca,cansancio) VALUES ({ID_usuario},"{CP}","{congestion_nasal}","{dolor_cabeza}","{tos_seca}","{cansancio}")'
cursor2.execute(query2)
conexion2.commit()
print("\nMUCHAS GRACIAS, POR FAVOR PROCEDA A TOMAR SUS MEDIDAS BIOMÉTRICAS\n")
except:
print("\nERROR: No se pudo realizar la accion, por favor revise que hayan introducido los datos correctos.\n")
def opcion1(cursor, conexion):
"""
Se realiza un promedio semanal de los indicadores cuantitativos y cualitativos para un cierto usuario.
En caso de que los datos impliquen una posible infección por COVID-19, se emite una alerta.
"""
clear()
print("\nMI DIAGNÓSTICO")
#promedio de una semana
nombre = input("Ingresa tu nombre: ")
apellido = input("Ingresa tus apellidos: ")
query = f'SELECT week(db.fecha) as "Semana", p.ID_usuario as "ID Usuario", p.nombres as "Nombre",p.apellidos as "Apellido", AVG(db.saturacion_oxigeno) as "Saturacion Oxigeno", AVG(db.ritmo_cardiaco) as "Ritmo cardiaco", AVG(db.temperatura) as "Temperatura", COUNT( CASE WHEN db.congestion_nasal = "si" THEN 1 END ) AS "Veces con congestion nasal", COUNT( CASE WHEN db.dolor_cabeza = "si" THEN 1 END ) AS "Veces con dolor de cabeza", COUNT( CASE WHEN db.tos_seca = "si" THEN 1 END ) AS "Veces con tos seca", COUNT( CASE WHEN db.cansancio = "si" THEN 1 END ) AS "Veces con cansancio" FROM datos_biometricos db, persona p WHERE db.ID_usuario = p.ID_usuario AND DATEDIFF(NOW(),db.fecha) < 15 AND p.nombres LIKE "{nombre}" AND p.apellidos LIKE "{apellido}" GROUP by week(db.fecha), p.ID_usuario'
cursor.execute(query)
bd = pd.DataFrame(columns = ["Semana", "ID_Usuario", "Nombre", "Apellido", "Promedio Saturacion Oxigeno", "Promedio Ritmo Cardiaco", "Promedio Temperatura", "Veces con congestion nasal", "Veces con dolor de cabeza", "Veces con tos seca", "Veces con cansancio"])
for fila in cursor:
bd = bd.append({"Semana": fila[0], "ID_Usuario": fila[1], "Nombre": fila[2], "Apellido": fila[3], "Promedio Saturacion Oxigeno": fila[4], "Promedio Ritmo Cardiaco": fila[5], "Promedio Temperatura": fila[6], "Veces con congestion nasal": fila[7], "Veces con dolor de cabeza": fila[8], "Veces con tos seca": fila[9], "Veces con cansancio": fila[10]}, ignore_index=True)
conexion.close()
print(bd)
semanas = ["Semana 1","Semana 2"]
promOx=bd["Promedio Saturacion Oxigeno"].tolist()
promRpm = bd["Promedio Ritmo Cardiaco"].tolist()
promTemp = bd["Promedio Temperatura"].tolist()
congestion = bd["Veces con congestion nasal"].tolist()
cabeza = bd["Veces con dolor de cabeza"].tolist()
cansancio = bd["Veces con cansancio"].tolist()
tosseca = bd["Veces con tos seca"].tolist()
x = np.arange(len(semanas))
width = 0.35
fig, ax = plt.subplots()
rects2 = ax.bar(x -0.5 + width/3 , promRpm, width, label='RPM [bpm]')
rects1 = ax.bar(x - width/3, promOx, width, label='Oxigenacion [%SpO2]')
rects3 = ax.bar(x + width/3, promTemp, width, label='Temperatura [C°]')
condTemp = mean(promTemp) > 37 or mean(promTemp) < 36
condOx = mean(promOx) < 95
condRpm = mean(promRpm) < 60
sintomas = sum(congestion) > 4 or sum(cabeza) > 4 or sum(cansancio) > 4 or sum(tosseca) > 4
#if(pulso y temp alarmantes) poner mensaje "Acuda al médico"
if (condTemp and condOx and sintomas):
print("\nALERTA 1: Sus datos de las últimas dos semanas indican que usted\npuede haber contraido el virus COVID-19. POR FAVOR CONSULTE A SU\nMÉDICO A LA BREVEDAD Y ABSTENGASE DEL CONTACTO CON OTRAS PERSONAS.\n")
if( mean(promTemp) > 37 and condRpm):
print("\nALERTA 2: su pulso promedio es muy bajo, ello suele ser\n un indicador de riesgo en pacientes con COVID-19, ACUDA A UN HOSPITAL PARA SU REVISIÓN.")
else:
print("\nSus datos indican que usted no presenta ningún síntoma\nnotorio de COVID-19. Sin embargo, recuerde seguir tomando sus precauciones.\n")
ax.set_ylabel("Promedios")
ax.set_xticks(x)
ax.set_title("Estadísticas de 2 semanas")
ax.set_xticklabels(semanas)
ax.legend(loc=8, prop={'size': 6})
rectas = [rects1, rects2, rects3]
for x in rectas:
for rect in x:
height = rect.get_height()
ax.annotate('{}'.format(round(height,2)),
xy=(rect.get_x() + rect.get_width() / 2, height-2),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
plt.show()
#Oxigenación 95-100 NORMAL, menor a 95 WARNING
#Temperatura 36.5°C to 37°C NORMAL, diferente WARNING
#Ritmo cardiaco 60 a 100 NORMAL
input("Ingrese cualquier letra para regresar: ")
def opcion2(cursor, conexion):
"""
Esta vista genera una gráfica de barras de todos los códigos postales con al menos un posible
caso de COVID-19.
"""
clear()
print("\nZONAS CON MÁS SOSPECHOSOS")
cursor.execute("SELECT datos_biometricos.codigo_postal as 'Zona', COUNT(DISTINCT persona.ID_usuario) as 'Número de Posibles Infectados' FROM persona, datos_biometricos WHERE datos_biometricos.ID_usuario = persona.ID_usuario AND DATEDIFF(NOW(),datos_biometricos.fecha) < 8 AND (datos_biometricos.temperatura > 37 OR datos_biometricos.temperatura < 37) AND datos_biometricos.saturacion_oxigeno < 95 AND ((datos_biometricos.congestion_nasal = 'si') OR (datos_biometricos.dolor_cabeza = 'si') OR (datos_biometricos.tos_seca = 'si') OR (datos_biometricos.cansancio = 'si')) GROUP BY datos_biometricos.codigo_postal ")
bd= pd.DataFrame(columns = ["Zona","Numero de Posibles Infectados"])
for fila in cursor:
bd = bd.append({ 'Zona': fila[0], 'Numero Posibles Infectados':fila[1]},
ignore_index=True)
conexion.close()
bd.style
bd.plot(kind = 'bar', x = 'Zona', y = 'Numero Posibles Infectados')
plt.show()
input("Ingrese cualquier letra para regresar: ")
def opcion3(cursor, conexion):
"""
Esta vista se encarga de proporcionar los nombres completos de las personas que tengan
alguna condición que los categorice como de alto riesgo. Se muestran los nombres en una
tabla o data frame.
"""
clear()
print("\nPERSONAS REGISTRADAS DE ALTO RIESGO")
cursor.execute('select p.ID_usuario, p.nombres as "Nombre", p.apellidos as "Apellidos" from persona p, persona_condicion pc, condicion_de_salud cs where (p.ID_usuario = pc.ID_usuario AND pc.ID_condicion = cs.ID_condicion AND cs.riesgo_covid = "alto") OR TIMESTAMPDIFF(YEAR, p.fecha_nacimiento, CURDATE()) > 59 GROUP BY p.ID_usuario, p.nombres, p.apellidos')
bd = pd.DataFrame(columns = ["ID_Usuario","Nombre", "Apellido"])
for fila in cursor:
bd = bd.append({"ID_Usuario" : fila[0], "Nombre": fila[1], "Apellido": fila[2]}, ignore_index=True)
conexion.close()
print(bd)
input("Ingrese cualquier letra para regresar: ")
def opcion4(cursor, conexion):
"""
Esta vista proporciona una tabla o data frame que muestra todos los códigos postales en
donde se registraron los datos biométricos de una persona.
"""
clear()
print("\nRASTREO")
apellido = input("Ingrese los apellidos: ")
nombre = input("Ingrese el nombre: ")
query = f'SELECT db.codigo_postal as "Código Postal", p.nombres as "Nombre", p.apellidos as "Apellido" from datos_biometricos db, persona p where db.ID_usuario = p.ID_usuario and p.nombres like "{nombre}" and p.apellidos like "{apellido}" GROUP BY db.codigo_postal, p.nombres, p.apellidos'
cursor.execute(query)
bd = pd.DataFrame(columns = ["Código Postal","Nombre","Apellido"])
for fila in cursor:
bd = bd.append({"Código Postal": fila[0], "Nombre": fila[1], "Apellido": fila[2]}, ignore_index=True)
conexion.close()
print(bd)
input("Ingrese cualquier letra para regresar: ")
def opcion5(cursor, conexion):
"""
Esta vista permite que el usuario introduzca su código postal y pueda saber el número de
posibles casos de COVID-19 cuyos registros se realizaron en dicha zona.
"""
clear()
print("\nPOSIBLES CASOS EN MI ZONA")
codigo_postal = input("Ingresa tu código postal: ")
cursor.execute(f"SELECT datos_biometricos.codigo_postal as 'Zona', COUNT(DISTINCT persona.ID_usuario) as 'Número de Posibles Infectados' FROM persona, datos_biometricos WHERE datos_biometricos.ID_usuario = persona.ID_usuario AND datos_biometricos.codigo_postal = '{codigo_postal}' AND DATEDIFF(NOW(),datos_biometricos.fecha) < 8 AND (datos_biometricos.temperatura > 37 OR datos_biometricos.temperatura < 37) AND datos_biometricos.saturacion_oxigeno < 95 AND ((datos_biometricos.congestion_nasal = 'si') OR (datos_biometricos.dolor_cabeza = 'si') OR (datos_biometricos.tos_seca = 'si') OR (datos_biometricos.cansancio = 'si')) GROUP BY datos_biometricos.codigo_postal ")
bd= pd.DataFrame(columns = ["Zona","Numero de Posibles Infectados"])
for fila in cursor:
bd = bd.append({ 'Zona': fila[0], 'Numero de Posibles Infectados':fila[1]},
ignore_index=True)
conexion.close()
bd.style
print(bd)
input("Ingrese cualquier letra para regresar: ")
def opcion6(cursor, conexion):
"""
Esta vista regresa una tabla con los ID, nombres, apellidos, los promedios de temperatura y
los promedios de oxigenación de las personas que han presentado indicios de haber contraído
el virus en los últimos 7 días.
"""
clear()
print("\nPOSIBLES CASOS DE COVID-19")
cursor.execute("SELECT persona.ID_usuario, persona.nombres, persona.apellidos, ROUND(AVG(datos_biometricos.temperatura),2), ROUND(AVG(datos_biometricos.saturacion_oxigeno),2) FROM persona, datos_biometricos WHERE datos_biometricos.ID_usuario = persona.ID_usuario AND DATEDIFF(NOW(),datos_biometricos.fecha) < 8 GROUP BY datos_biometricos.ID_usuario HAVING (AVG(datos_biometricos.temperatura) > 37 OR AVG(datos_biometricos.temperatura) < 36) AND AVG(datos_biometricos.saturacion_oxigeno) < 95 AND (COUNT(datos_biometricos.congestion_nasal = 'si') > 4 OR COUNT(datos_biometricos.dolor_cabeza = 'si') > 4 OR COUNT(datos_biometricos.tos_seca = 'si') > 4 OR COUNT(datos_biometricos.cansancio = 'si') > 4)")
bd= pd.DataFrame(columns = ["ID Usuario","Nombre(s)","Apellido(s)","Promedio Temperatura [°C]","Promedio Saturación Oxígeno [%SpO2]"])
for fila in cursor:
bd = bd.append({ 'ID Usuario': fila[0], 'Nombre(s)':fila[1], 'Apellido(s)':fila[2],'Promedio Temperatura [°C]':fila[3], 'Promedio Saturación Oxígeno [%SpO2]':fila[4]},
ignore_index=True)
conexion.close()
bd.style
print(bd)
input("Ingrese cualquier letra para regresar: ")
def main():
while True:
clear()
conexion = conexion_sql()
cursor = conexion.cursor()
print("Reto de IoT\n\n0)Mis mediciones de hoy\n1)Mi diagnóstico\n2)Zonas con más sospechosos\n3)Personas registradas de alto riesgo\n4)Rastreo\n5)Posibles casos en mi zona\n6)Posibles casos de COVID-19\n7)SALIR")
opcion = int(input("Seleccione una opción: "))
if (opcion == 0):
opcion0(cursor,conexion)
elif(opcion == 1):
opcion1(cursor, conexion)
elif(opcion == 2):
opcion2(cursor, conexion)
elif(opcion == 3):
opcion3(cursor, conexion)
elif(opcion == 4):
opcion4(cursor, conexion)
elif(opcion == 5):
opcion5(cursor, conexion)
elif(opcion == 6):
opcion6(cursor, conexion)
elif(opcion == 7):
conexion.close()
sys.exit()
else:
print("No ingresó una opción correcta.")
main() | Resaltador Paralelo/Carpeta1/bio.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os import system, name
import sys
import mysql.connector
from statistics import mean
def conexion_sql():
conexion=mysql.connector.connect(host="localhost",
user="-",
passwd="-",
database="-")
return conexion
def clear():
if name == 'nt':
s = system('cls')
def opcion0(cursor,conexion):
"""
Opción para tomar las mediciones del día. Se preguntan apellidos, nombre, edad, sexo
y una serie de datos cualitativos que pueden ser indicadores del COVID-19. Se busca a la persona en
la base de datos y se crea un nuevo registro de sus mediciones con la fecha actual. Después de
seleccionar esta opción la persona debe utilizar la estación de toma de datos
biométricos para mandar los datos cuantitativos.
"""
try:
clear()
print("\nMIS MEDICIONES DE HOY")
print("\nIngrese sus datos:")
apellidos = input("Ingresa tus apellidos: ")
nombres = input("Ingresa tu nombre(s): ")
edad = input("Ingresa tu edad: ")
sexo = input("Ingresa tu sexo [M para masculino, F para femenino, O para otro]: ")
CP = input("Ingrese el código postal del lugar donde se encuentra: ")
print("\nEn la última semana ...")
congestion_nasal = input("¿Ha tenido congestion nasal? Ingrese si/no: ")
dolor_cabeza = input("¿Ha tenido dolor de cabeza? Ingrese si/no: ")
tos_seca = input("¿Ha tenido tos seca? Ingrese si/no: ")
cansancio = input("¿Ha sentido cansancio fuera de lo normal? Ingrese si/no: ")
if(sexo.lower() in ['m','hombre']):
sexo = 'masculino'
elif(sexo.lower() in ['f','mujer']):
sexo = 'femenino'
elif(sexo.lower() in ['o','otro','0']):
sexo = 'otro'
query = f'SELECT ID_usuario FROM persona WHERE nombres = "{nombres}" AND apellidos = "{apellidos}" AND sexo = "{sexo}" AND TIMESTAMPDIFF(YEAR, persona.fecha_nacimiento,CURDATE()) = {edad}'
cursor.execute(query)
bd = pd.DataFrame(columns = ["ID_usuario"])
for fila in cursor:
bd = bd.append({"ID_usuario":fila[0]}, ignore_index=True)
ID_usuario = bd.iat[0,0]
print(ID_usuario)
conexion.close()
conexion2 = conexion_sql()
cursor2 = conexion2.cursor()
query2 = f'INSERT INTO datos_biometricos (ID_usuario,codigo_postal,congestion_nasal,dolor_cabeza, tos_seca,cansancio) VALUES ({ID_usuario},"{CP}","{congestion_nasal}","{dolor_cabeza}","{tos_seca}","{cansancio}")'
cursor2.execute(query2)
conexion2.commit()
print("\nMUCHAS GRACIAS, POR FAVOR PROCEDA A TOMAR SUS MEDIDAS BIOMÉTRICAS\n")
except:
print("\nERROR: No se pudo realizar la accion, por favor revise que hayan introducido los datos correctos.\n")
def opcion1(cursor, conexion):
"""
Se realiza un promedio semanal de los indicadores cuantitativos y cualitativos para un cierto usuario.
En caso de que los datos impliquen una posible infección por COVID-19, se emite una alerta.
"""
clear()
print("\nMI DIAGNÓSTICO")
#promedio de una semana
nombre = input("Ingresa tu nombre: ")
apellido = input("Ingresa tus apellidos: ")
query = f'SELECT week(db.fecha) as "Semana", p.ID_usuario as "ID Usuario", p.nombres as "Nombre",p.apellidos as "Apellido", AVG(db.saturacion_oxigeno) as "Saturacion Oxigeno", AVG(db.ritmo_cardiaco) as "Ritmo cardiaco", AVG(db.temperatura) as "Temperatura", COUNT( CASE WHEN db.congestion_nasal = "si" THEN 1 END ) AS "Veces con congestion nasal", COUNT( CASE WHEN db.dolor_cabeza = "si" THEN 1 END ) AS "Veces con dolor de cabeza", COUNT( CASE WHEN db.tos_seca = "si" THEN 1 END ) AS "Veces con tos seca", COUNT( CASE WHEN db.cansancio = "si" THEN 1 END ) AS "Veces con cansancio" FROM datos_biometricos db, persona p WHERE db.ID_usuario = p.ID_usuario AND DATEDIFF(NOW(),db.fecha) < 15 AND p.nombres LIKE "{nombre}" AND p.apellidos LIKE "{apellido}" GROUP by week(db.fecha), p.ID_usuario'
cursor.execute(query)
bd = pd.DataFrame(columns = ["Semana", "ID_Usuario", "Nombre", "Apellido", "Promedio Saturacion Oxigeno", "Promedio Ritmo Cardiaco", "Promedio Temperatura", "Veces con congestion nasal", "Veces con dolor de cabeza", "Veces con tos seca", "Veces con cansancio"])
for fila in cursor:
bd = bd.append({"Semana": fila[0], "ID_Usuario": fila[1], "Nombre": fila[2], "Apellido": fila[3], "Promedio Saturacion Oxigeno": fila[4], "Promedio Ritmo Cardiaco": fila[5], "Promedio Temperatura": fila[6], "Veces con congestion nasal": fila[7], "Veces con dolor de cabeza": fila[8], "Veces con tos seca": fila[9], "Veces con cansancio": fila[10]}, ignore_index=True)
conexion.close()
print(bd)
semanas = ["Semana 1","Semana 2"]
promOx=bd["Promedio Saturacion Oxigeno"].tolist()
promRpm = bd["Promedio Ritmo Cardiaco"].tolist()
promTemp = bd["Promedio Temperatura"].tolist()
congestion = bd["Veces con congestion nasal"].tolist()
cabeza = bd["Veces con dolor de cabeza"].tolist()
cansancio = bd["Veces con cansancio"].tolist()
tosseca = bd["Veces con tos seca"].tolist()
x = np.arange(len(semanas))
width = 0.35
fig, ax = plt.subplots()
rects2 = ax.bar(x -0.5 + width/3 , promRpm, width, label='RPM [bpm]')
rects1 = ax.bar(x - width/3, promOx, width, label='Oxigenacion [%SpO2]')
rects3 = ax.bar(x + width/3, promTemp, width, label='Temperatura [C°]')
condTemp = mean(promTemp) > 37 or mean(promTemp) < 36
condOx = mean(promOx) < 95
condRpm = mean(promRpm) < 60
sintomas = sum(congestion) > 4 or sum(cabeza) > 4 or sum(cansancio) > 4 or sum(tosseca) > 4
#if(pulso y temp alarmantes) poner mensaje "Acuda al médico"
if (condTemp and condOx and sintomas):
print("\nALERTA 1: Sus datos de las últimas dos semanas indican que usted\npuede haber contraido el virus COVID-19. POR FAVOR CONSULTE A SU\nMÉDICO A LA BREVEDAD Y ABSTENGASE DEL CONTACTO CON OTRAS PERSONAS.\n")
if( mean(promTemp) > 37 and condRpm):
print("\nALERTA 2: su pulso promedio es muy bajo, ello suele ser\n un indicador de riesgo en pacientes con COVID-19, ACUDA A UN HOSPITAL PARA SU REVISIÓN.")
else:
print("\nSus datos indican que usted no presenta ningún síntoma\nnotorio de COVID-19. Sin embargo, recuerde seguir tomando sus precauciones.\n")
ax.set_ylabel("Promedios")
ax.set_xticks(x)
ax.set_title("Estadísticas de 2 semanas")
ax.set_xticklabels(semanas)
ax.legend(loc=8, prop={'size': 6})
rectas = [rects1, rects2, rects3]
for x in rectas:
for rect in x:
height = rect.get_height()
ax.annotate('{}'.format(round(height,2)),
xy=(rect.get_x() + rect.get_width() / 2, height-2),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
plt.show()
#Oxigenación 95-100 NORMAL, menor a 95 WARNING
#Temperatura 36.5°C to 37°C NORMAL, diferente WARNING
#Ritmo cardiaco 60 a 100 NORMAL
input("Ingrese cualquier letra para regresar: ")
def opcion2(cursor, conexion):
"""
Esta vista genera una gráfica de barras de todos los códigos postales con al menos un posible
caso de COVID-19.
"""
clear()
print("\nZONAS CON MÁS SOSPECHOSOS")
cursor.execute("SELECT datos_biometricos.codigo_postal as 'Zona', COUNT(DISTINCT persona.ID_usuario) as 'Número de Posibles Infectados' FROM persona, datos_biometricos WHERE datos_biometricos.ID_usuario = persona.ID_usuario AND DATEDIFF(NOW(),datos_biometricos.fecha) < 8 AND (datos_biometricos.temperatura > 37 OR datos_biometricos.temperatura < 37) AND datos_biometricos.saturacion_oxigeno < 95 AND ((datos_biometricos.congestion_nasal = 'si') OR (datos_biometricos.dolor_cabeza = 'si') OR (datos_biometricos.tos_seca = 'si') OR (datos_biometricos.cansancio = 'si')) GROUP BY datos_biometricos.codigo_postal ")
bd= pd.DataFrame(columns = ["Zona","Numero de Posibles Infectados"])
for fila in cursor:
bd = bd.append({ 'Zona': fila[0], 'Numero Posibles Infectados':fila[1]},
ignore_index=True)
conexion.close()
bd.style
bd.plot(kind = 'bar', x = 'Zona', y = 'Numero Posibles Infectados')
plt.show()
input("Ingrese cualquier letra para regresar: ")
def opcion3(cursor, conexion):
"""
Esta vista se encarga de proporcionar los nombres completos de las personas que tengan
alguna condición que los categorice como de alto riesgo. Se muestran los nombres en una
tabla o data frame.
"""
clear()
print("\nPERSONAS REGISTRADAS DE ALTO RIESGO")
cursor.execute('select p.ID_usuario, p.nombres as "Nombre", p.apellidos as "Apellidos" from persona p, persona_condicion pc, condicion_de_salud cs where (p.ID_usuario = pc.ID_usuario AND pc.ID_condicion = cs.ID_condicion AND cs.riesgo_covid = "alto") OR TIMESTAMPDIFF(YEAR, p.fecha_nacimiento, CURDATE()) > 59 GROUP BY p.ID_usuario, p.nombres, p.apellidos')
bd = pd.DataFrame(columns = ["ID_Usuario","Nombre", "Apellido"])
for fila in cursor:
bd = bd.append({"ID_Usuario" : fila[0], "Nombre": fila[1], "Apellido": fila[2]}, ignore_index=True)
conexion.close()
print(bd)
input("Ingrese cualquier letra para regresar: ")
def opcion4(cursor, conexion):
"""
Esta vista proporciona una tabla o data frame que muestra todos los códigos postales en
donde se registraron los datos biométricos de una persona.
"""
clear()
print("\nRASTREO")
apellido = input("Ingrese los apellidos: ")
nombre = input("Ingrese el nombre: ")
query = f'SELECT db.codigo_postal as "Código Postal", p.nombres as "Nombre", p.apellidos as "Apellido" from datos_biometricos db, persona p where db.ID_usuario = p.ID_usuario and p.nombres like "{nombre}" and p.apellidos like "{apellido}" GROUP BY db.codigo_postal, p.nombres, p.apellidos'
cursor.execute(query)
bd = pd.DataFrame(columns = ["Código Postal","Nombre","Apellido"])
for fila in cursor:
bd = bd.append({"Código Postal": fila[0], "Nombre": fila[1], "Apellido": fila[2]}, ignore_index=True)
conexion.close()
print(bd)
input("Ingrese cualquier letra para regresar: ")
def opcion5(cursor, conexion):
"""
Esta vista permite que el usuario introduzca su código postal y pueda saber el número de
posibles casos de COVID-19 cuyos registros se realizaron en dicha zona.
"""
clear()
print("\nPOSIBLES CASOS EN MI ZONA")
codigo_postal = input("Ingresa tu código postal: ")
cursor.execute(f"SELECT datos_biometricos.codigo_postal as 'Zona', COUNT(DISTINCT persona.ID_usuario) as 'Número de Posibles Infectados' FROM persona, datos_biometricos WHERE datos_biometricos.ID_usuario = persona.ID_usuario AND datos_biometricos.codigo_postal = '{codigo_postal}' AND DATEDIFF(NOW(),datos_biometricos.fecha) < 8 AND (datos_biometricos.temperatura > 37 OR datos_biometricos.temperatura < 37) AND datos_biometricos.saturacion_oxigeno < 95 AND ((datos_biometricos.congestion_nasal = 'si') OR (datos_biometricos.dolor_cabeza = 'si') OR (datos_biometricos.tos_seca = 'si') OR (datos_biometricos.cansancio = 'si')) GROUP BY datos_biometricos.codigo_postal ")
bd= pd.DataFrame(columns = ["Zona","Numero de Posibles Infectados"])
for fila in cursor:
bd = bd.append({ 'Zona': fila[0], 'Numero de Posibles Infectados':fila[1]},
ignore_index=True)
conexion.close()
bd.style
print(bd)
input("Ingrese cualquier letra para regresar: ")
def opcion6(cursor, conexion):
"""
Esta vista regresa una tabla con los ID, nombres, apellidos, los promedios de temperatura y
los promedios de oxigenación de las personas que han presentado indicios de haber contraído
el virus en los últimos 7 días.
"""
clear()
print("\nPOSIBLES CASOS DE COVID-19")
cursor.execute("SELECT persona.ID_usuario, persona.nombres, persona.apellidos, ROUND(AVG(datos_biometricos.temperatura),2), ROUND(AVG(datos_biometricos.saturacion_oxigeno),2) FROM persona, datos_biometricos WHERE datos_biometricos.ID_usuario = persona.ID_usuario AND DATEDIFF(NOW(),datos_biometricos.fecha) < 8 GROUP BY datos_biometricos.ID_usuario HAVING (AVG(datos_biometricos.temperatura) > 37 OR AVG(datos_biometricos.temperatura) < 36) AND AVG(datos_biometricos.saturacion_oxigeno) < 95 AND (COUNT(datos_biometricos.congestion_nasal = 'si') > 4 OR COUNT(datos_biometricos.dolor_cabeza = 'si') > 4 OR COUNT(datos_biometricos.tos_seca = 'si') > 4 OR COUNT(datos_biometricos.cansancio = 'si') > 4)")
bd= pd.DataFrame(columns = ["ID Usuario","Nombre(s)","Apellido(s)","Promedio Temperatura [°C]","Promedio Saturación Oxígeno [%SpO2]"])
for fila in cursor:
bd = bd.append({ 'ID Usuario': fila[0], 'Nombre(s)':fila[1], 'Apellido(s)':fila[2],'Promedio Temperatura [°C]':fila[3], 'Promedio Saturación Oxígeno [%SpO2]':fila[4]},
ignore_index=True)
conexion.close()
bd.style
print(bd)
input("Ingrese cualquier letra para regresar: ")
def main():
while True:
clear()
conexion = conexion_sql()
cursor = conexion.cursor()
print("Reto de IoT\n\n0)Mis mediciones de hoy\n1)Mi diagnóstico\n2)Zonas con más sospechosos\n3)Personas registradas de alto riesgo\n4)Rastreo\n5)Posibles casos en mi zona\n6)Posibles casos de COVID-19\n7)SALIR")
opcion = int(input("Seleccione una opción: "))
if (opcion == 0):
opcion0(cursor,conexion)
elif(opcion == 1):
opcion1(cursor, conexion)
elif(opcion == 2):
opcion2(cursor, conexion)
elif(opcion == 3):
opcion3(cursor, conexion)
elif(opcion == 4):
opcion4(cursor, conexion)
elif(opcion == 5):
opcion5(cursor, conexion)
elif(opcion == 6):
opcion6(cursor, conexion)
elif(opcion == 7):
conexion.close()
sys.exit()
else:
print("No ingresó una opción correcta.")
main() | 0.212395 | 0.308171 |
with open('Data.txt') as f:
for line in f:
string = line
def is_int(n):
try:
int(n)
return True
except ValueError:
return False
def sum_string(input_string):
sum = 0
for i in [j for j in ':[]{}']:
input_string = input_string.replace(i, ',')
string_components = input_string.split(',')
for component in string_components:
if is_int(component) == True:
sum += int(component)
return(sum)
def has_red(string_object):
left_brace_count = 0
left_square_count = 0
for i in range(len(string_object) - 2):
if string_object[i] == '{':
left_brace_count += 1
elif string_object[i] == '}':
left_brace_count -= 1
elif string_object[i] == '[':
left_square_count += 1
elif string_object[i] == ']':
left_square_count -= 1
if (string_object[i:i + 3] == 'red') and (left_brace_count == 1) and (left_square_count == 0):
return True
else:
return False
def has_objects(string_object):
if '{' in string_object[1:]:
return True
else:
return False
def matching_right_brace(index_left, entire_string):
left_brace_count = 0
for i in range(index_left, len(entire_string)):
if entire_string[i] == '{':
left_brace_count += 1
elif entire_string[i] == '}':
left_brace_count -= 1
if left_brace_count == 0:
return(i)
total = 0
index = 0
while '{' in string:
if string[index] == '{':
if has_red(string[index:matching_right_brace(index, string) + 1]) == False:
if has_objects(string[index:matching_right_brace(index, string) + 1]) == False:
total += sum_string(string[index:matching_right_brace(index, string) + 1])
string = string[:index] + string[matching_right_brace(index, string) + 1:]
else:
index = string.index('{', index + 1)
else:
string = string[:index] + string[matching_right_brace(index, string) + 1:]
elif index == len(string) - 1:
index = 0
else:
index += 1
total += sum_string(string)
print(total) | Days/Day 12 - JSAbacusFramework.io/Part 2.py | with open('Data.txt') as f:
for line in f:
string = line
def is_int(n):
try:
int(n)
return True
except ValueError:
return False
def sum_string(input_string):
sum = 0
for i in [j for j in ':[]{}']:
input_string = input_string.replace(i, ',')
string_components = input_string.split(',')
for component in string_components:
if is_int(component) == True:
sum += int(component)
return(sum)
def has_red(string_object):
left_brace_count = 0
left_square_count = 0
for i in range(len(string_object) - 2):
if string_object[i] == '{':
left_brace_count += 1
elif string_object[i] == '}':
left_brace_count -= 1
elif string_object[i] == '[':
left_square_count += 1
elif string_object[i] == ']':
left_square_count -= 1
if (string_object[i:i + 3] == 'red') and (left_brace_count == 1) and (left_square_count == 0):
return True
else:
return False
def has_objects(string_object):
if '{' in string_object[1:]:
return True
else:
return False
def matching_right_brace(index_left, entire_string):
left_brace_count = 0
for i in range(index_left, len(entire_string)):
if entire_string[i] == '{':
left_brace_count += 1
elif entire_string[i] == '}':
left_brace_count -= 1
if left_brace_count == 0:
return(i)
total = 0
index = 0
while '{' in string:
if string[index] == '{':
if has_red(string[index:matching_right_brace(index, string) + 1]) == False:
if has_objects(string[index:matching_right_brace(index, string) + 1]) == False:
total += sum_string(string[index:matching_right_brace(index, string) + 1])
string = string[:index] + string[matching_right_brace(index, string) + 1:]
else:
index = string.index('{', index + 1)
else:
string = string[:index] + string[matching_right_brace(index, string) + 1:]
elif index == len(string) - 1:
index = 0
else:
index += 1
total += sum_string(string)
print(total) | 0.110411 | 0.329419 |
from __future__ import (print_function, division, absolute_import,
unicode_literals)
from collections import OrderedDict, namedtuple
import logging
import os
import xml.etree.ElementTree as etree
from glyphsLib.builder.builders import UFOBuilder
from glyphsLib.builder.custom_params import to_ufo_custom_params
from glyphsLib.builder.names import build_stylemap_names
from glyphsLib.builder.constants import GLYPHS_PREFIX
from glyphsLib.builder.instances import apply_instance_data, InstanceData
from glyphsLib.util import build_ufo_path, write_ufo, clean_ufo
__all__ = [
'interpolate', 'build_designspace', 'apply_instance_data'
]
logger = logging.getLogger(__name__)
# DEPRECATED
def interpolate(ufos, master_dir, out_dir, instance_data, round_geometry=True):
"""Create MutatorMath designspace and generate instances.
Returns instance UFOs.
"""
# Problem with this function: should take a designspace explicitly.
from mutatorMath.ufo import build
designspace_path, instance_files = build_designspace(
ufos, master_dir, out_dir, instance_data)
logger.info('Building instances')
for path, _ in instance_files:
clean_ufo(path)
build(designspace_path, outputUFOFormatVersion=3,
roundGeometry=round_geometry)
instance_ufos = apply_instance_data(instance_files)
return instance_ufos
# DEPRECATED
def build_designspace(masters, master_dir, out_dir, instance_data):
"""Just create MutatorMath designspace without generating instances.
Returns the path of the resulting designspace document and a list of
(instance_path, instance_data) tuples which map instance UFO filenames to
Glyphs data for that instance.
"""
# TODO: (jany) check whether this function is still useful
# No need to build a designspace, we should have it in "instance_data"
designspace = instance_data['designspace']
# Move masters and instances to the designated directories
for font in masters:
write_ufo(font, master_dir)
for source in designspace.sources:
if source.font is font:
source.path = font.path
for instance in designspace.instances:
instance.path = os.path.join(out_dir,
os.path.basename(instance.filename))
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
return designspace_path, InstanceData(designspace) | Lib/glyphsLib/interpolation.py |
from __future__ import (print_function, division, absolute_import,
unicode_literals)
from collections import OrderedDict, namedtuple
import logging
import os
import xml.etree.ElementTree as etree
from glyphsLib.builder.builders import UFOBuilder
from glyphsLib.builder.custom_params import to_ufo_custom_params
from glyphsLib.builder.names import build_stylemap_names
from glyphsLib.builder.constants import GLYPHS_PREFIX
from glyphsLib.builder.instances import apply_instance_data, InstanceData
from glyphsLib.util import build_ufo_path, write_ufo, clean_ufo
__all__ = [
'interpolate', 'build_designspace', 'apply_instance_data'
]
logger = logging.getLogger(__name__)
# DEPRECATED
def interpolate(ufos, master_dir, out_dir, instance_data, round_geometry=True):
"""Create MutatorMath designspace and generate instances.
Returns instance UFOs.
"""
# Problem with this function: should take a designspace explicitly.
from mutatorMath.ufo import build
designspace_path, instance_files = build_designspace(
ufos, master_dir, out_dir, instance_data)
logger.info('Building instances')
for path, _ in instance_files:
clean_ufo(path)
build(designspace_path, outputUFOFormatVersion=3,
roundGeometry=round_geometry)
instance_ufos = apply_instance_data(instance_files)
return instance_ufos
# DEPRECATED
def build_designspace(masters, master_dir, out_dir, instance_data):
"""Just create MutatorMath designspace without generating instances.
Returns the path of the resulting designspace document and a list of
(instance_path, instance_data) tuples which map instance UFO filenames to
Glyphs data for that instance.
"""
# TODO: (jany) check whether this function is still useful
# No need to build a designspace, we should have it in "instance_data"
designspace = instance_data['designspace']
# Move masters and instances to the designated directories
for font in masters:
write_ufo(font, master_dir)
for source in designspace.sources:
if source.font is font:
source.path = font.path
for instance in designspace.instances:
instance.path = os.path.join(out_dir,
os.path.basename(instance.filename))
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
return designspace_path, InstanceData(designspace) | 0.61057 | 0.148201 |
import enum
import os, sys
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
class AgentMode(enum.Enum):
TRAIN = "TRAIN"
TEST = "TEST"
PLAY = "PLAY"
class OSName(enum.Enum):
MAC = "MAC"
WINDOWS = "WINDOWS"
LINUX = "LINUX"
REAL_RIP = "REAL_RIP_PLATFORM"
class EnvironmentName(enum.Enum):
ACROBOT_V1 = "Acrobot-v1"
CARTPOLE_V0 = "CartPole-v0"
CARTPOLE_V1 = "CartPole-v1"
MOUNTAINCAR_V0 = "MountainCar-v0"
MOUNTAINCARCONTINUOUS_V0 = "MountainCarContinuous-v0"
BLACKJACK_V0 = "Blackjack-v0"
QUANSER_SERVO_2 = "Quanser_Servo_2"
SYNCRONIZE_V0 = "Syncronize_V0"
ADJUST_ANGLE_V0 = "Adjust_angle_V0"
CHASER_V1_MAC = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "Chaser_v1")
CHASER_V1_WINDOWS = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "Chaser_v1.exe")
BREAKOUT_DETERMINISTIC_V4 = "BreakoutDeterministic-v4"
PENDULUM_V0 = 'Pendulum-v0'
LUNAR_LANDER_V2 = 'LunarLander-v2'
LUNAR_LANDER_CONTINUOUS_V2 = "LunarLanderContinuous-v2"
DRONE_RACING_MAC = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "DroneEnv_forMac")
DRONE_RACING_WINDOWS = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "Dron_Racing.exe")
GRIDWORLD_V0 = 'Gridworld-v0'
FROZENLAKE_V0 = 'FrozenLake-v0'
INVERTED_DOUBLE_PENDULUM_V2 = 'InvertedDoublePendulum-v2'
HOPPER_V2 = 'Hopper-v2'
PYBULLET_ANT_V0 = 'AntBulletEnv-v0'
PYBULLET_INVERTED_DOUBLE_PENDULUM_V0 = 'InvertedDoublePendulumBulletEnv-v0'
PYBULLET_HALF_CHEETAH_V0 = 'HalfCheetahBulletEnv-v0'
PYBULLET_HUMANOID_V0 = 'HumanoidBulletEnv-v0'
SWIMMER_V2 = 'Swimmer-v2'
REACHER_V2 = 'Reacher-v2'
HUMANOID_V2 = 'Humanoid-v2'
HUMANOID_STAND_UP_V2 = 'HumanoidStandup-v2'
INVERTED_PENDULUM_V2 = 'InvertedPendulum-v2'
WALKER_2D_V2 = 'Walker2d-v2'
PONG_NO_FRAME_SKIP_V4 = 'PongNoFrameskip-v4'
KUNGFU_MASTER_FRAME_SKIP_V4 = 'KungFuMasterNoFrameskip-v4'
BREAKOUT_NO_FRAME_SKIP_V4 = 'BreakoutNoFrameskip-v4'
SPACE_INVADERS_NO_FRAME_SKIP_V4 = "SpaceInvadersNoFrameskip-v4"
ENDURO_NO_FRAME_SKIP_V4 = "EnduroNoFrameskip-v4"
SEAQUEST_NO_FRAME_SKIP_V4 = "SeaquestNoFrameskip-v4"
FREEWAY_NO_FRAME_SKIP_V4 = "FreewayNoFrameskip-v4"
TSP_V0 = "TSP-v0" # bi-directional connections and uniform cost.
TSP_V1 = "TSP-v1" # bi-directional connections
KNAPSACK_V0 = "Knapsack-v0" # unbounded knapsack problem with 200 items.
KNAPSACK_V1 = "Knapsack-v1" # binary (0-1) knapsack problem with 200 items.
KNAPSACK_V2 = "Knapsack-v2" # bounded knapsack problem with 200 items.
KNAPSACK_V3 = "Knapsack-v3" # stochastic, online knapsack with 200 items.
PENDULUM_MATLAB_V0 = "Pendulum_Matlab_v0"
PENDULUM_MATLAB_DOUBLE_RIP_V0 = "Pendulum_Matlab_Double_RIP_v0"
REAL_DEVICE_RIP = "Real_Device_Rip"
REAL_DEVICE_DOUBLE_RIP = "Real_Device_Double_Rip"
TRADE_V0 = "Trade_v0"
SUPER_MARIO_BROS = "SuperMarioBros-v0"
TOY_V0 = "TOY_V0"
class DeepLearningModelName(enum.Enum):
DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP = "DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP"
CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_MLP = "CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_MLP"
DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN = "DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN"
CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_CNN = "CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_CNN"
DISCRETE_STOCHASTIC_ACTOR_CRITIC_RNN = "DISCRETE_STOCHASTIC_ACTOR_CRITIC_RNN"
CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_RNN = "CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_RNN"
DISCRETE_SAC_MLP = "DISCRETE_SAC_MLP"
CONTINUOUS_SAC_MLP = "CONTINUOUS_SAC_MLP"
DUELING_DQN_MLP = "DUELING_DQN_MLP"
DUELING_DQN_CNN = "DUELING_DQN_CNN"
DUELING_DQN_SMALL_CNN = "DUELING_DQN_SMALL_CNN"
RAINBOW_DQN_MLP = "RAINBOW_DQN_MLP"
CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_MLP = "CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_MLP"
CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_RNN = "CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_RNN"
TD3_MLP = "TD3_MLP"
SIMPLE_MLP = "SIMPLE_MLP"
SIMPLE_CNN = "SIMPLE_CNN"
SIMPLE_SMALL_CNN = "SIMPLE_SMALL_CNN"
class RLAlgorithmName(enum.Enum):
DQN_V0 = "DQN_V0"
RAINBOW_V0 = "RAINBOW_V0"
DDPG_V0 = "DDPG_V0"
D4PG_V0 = "D4PG_V0"
TD3_V0 = "TD3_V0"
DISCRETE_SAC_V0 = "DISCRETE_SAC_V0"
CONTINUOUS_SAC_V0 = "CONTINUOUS_SAC_V0"
DISCRETE_A2C_V0 = "DISCRETE_A2C_V0"
CONTINUOUS_A2C_V0 = "CONTINUOUS_A2C_V0"
POLICY_GRADIENT_V0 = "POLICY_GRADIENT_V0"
REINFORCE_V0 = "REINFORCE_V0"
DISCRETE_PPO_V0 = "DISCRETE_PPO_V0"
CONTINUOUS_PPO_V0 = "CONTINUOUS_PPO_V0"
EVOLUTION_STRATEGY = "EVOLUTION_STRATEGY"
GENETIC_ALGORITHM = "GENETIC_ALGORITHM"
MULTI_GENETIC_ALGORITHM = "MULTI_GENETIC_ALGORITHM"
class OptimizerName(enum.Enum):
NESTEROV = "nesterov"
ADAM = "Adam"
RMSProp = "RMSProp"
OFF_POLICY_RL_ALGORITHMS = [
RLAlgorithmName.DQN_V0,
RLAlgorithmName.DDPG_V0,
RLAlgorithmName.D4PG_V0,
RLAlgorithmName.RAINBOW_V0,
RLAlgorithmName.TD3_V0,
RLAlgorithmName.CONTINUOUS_SAC_V0,
RLAlgorithmName.DISCRETE_SAC_V0,
]
ON_POLICY_RL_ALGORITHMS = [
RLAlgorithmName.DISCRETE_A2C_V0,
RLAlgorithmName.CONTINUOUS_A2C_V0,
RLAlgorithmName.DISCRETE_PPO_V0,
RLAlgorithmName.CONTINUOUS_PPO_V0,
] | codes/e_utils/names.py | import enum
import os, sys
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
class AgentMode(enum.Enum):
TRAIN = "TRAIN"
TEST = "TEST"
PLAY = "PLAY"
class OSName(enum.Enum):
MAC = "MAC"
WINDOWS = "WINDOWS"
LINUX = "LINUX"
REAL_RIP = "REAL_RIP_PLATFORM"
class EnvironmentName(enum.Enum):
ACROBOT_V1 = "Acrobot-v1"
CARTPOLE_V0 = "CartPole-v0"
CARTPOLE_V1 = "CartPole-v1"
MOUNTAINCAR_V0 = "MountainCar-v0"
MOUNTAINCARCONTINUOUS_V0 = "MountainCarContinuous-v0"
BLACKJACK_V0 = "Blackjack-v0"
QUANSER_SERVO_2 = "Quanser_Servo_2"
SYNCRONIZE_V0 = "Syncronize_V0"
ADJUST_ANGLE_V0 = "Adjust_angle_V0"
CHASER_V1_MAC = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "Chaser_v1")
CHASER_V1_WINDOWS = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "Chaser_v1.exe")
BREAKOUT_DETERMINISTIC_V4 = "BreakoutDeterministic-v4"
PENDULUM_V0 = 'Pendulum-v0'
LUNAR_LANDER_V2 = 'LunarLander-v2'
LUNAR_LANDER_CONTINUOUS_V2 = "LunarLanderContinuous-v2"
DRONE_RACING_MAC = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "DroneEnv_forMac")
DRONE_RACING_WINDOWS = os.path.join(PROJECT_HOME, "rl_main", "environments", "unity", "unity_envs", "Dron_Racing.exe")
GRIDWORLD_V0 = 'Gridworld-v0'
FROZENLAKE_V0 = 'FrozenLake-v0'
INVERTED_DOUBLE_PENDULUM_V2 = 'InvertedDoublePendulum-v2'
HOPPER_V2 = 'Hopper-v2'
PYBULLET_ANT_V0 = 'AntBulletEnv-v0'
PYBULLET_INVERTED_DOUBLE_PENDULUM_V0 = 'InvertedDoublePendulumBulletEnv-v0'
PYBULLET_HALF_CHEETAH_V0 = 'HalfCheetahBulletEnv-v0'
PYBULLET_HUMANOID_V0 = 'HumanoidBulletEnv-v0'
SWIMMER_V2 = 'Swimmer-v2'
REACHER_V2 = 'Reacher-v2'
HUMANOID_V2 = 'Humanoid-v2'
HUMANOID_STAND_UP_V2 = 'HumanoidStandup-v2'
INVERTED_PENDULUM_V2 = 'InvertedPendulum-v2'
WALKER_2D_V2 = 'Walker2d-v2'
PONG_NO_FRAME_SKIP_V4 = 'PongNoFrameskip-v4'
KUNGFU_MASTER_FRAME_SKIP_V4 = 'KungFuMasterNoFrameskip-v4'
BREAKOUT_NO_FRAME_SKIP_V4 = 'BreakoutNoFrameskip-v4'
SPACE_INVADERS_NO_FRAME_SKIP_V4 = "SpaceInvadersNoFrameskip-v4"
ENDURO_NO_FRAME_SKIP_V4 = "EnduroNoFrameskip-v4"
SEAQUEST_NO_FRAME_SKIP_V4 = "SeaquestNoFrameskip-v4"
FREEWAY_NO_FRAME_SKIP_V4 = "FreewayNoFrameskip-v4"
TSP_V0 = "TSP-v0" # bi-directional connections and uniform cost.
TSP_V1 = "TSP-v1" # bi-directional connections
KNAPSACK_V0 = "Knapsack-v0" # unbounded knapsack problem with 200 items.
KNAPSACK_V1 = "Knapsack-v1" # binary (0-1) knapsack problem with 200 items.
KNAPSACK_V2 = "Knapsack-v2" # bounded knapsack problem with 200 items.
KNAPSACK_V3 = "Knapsack-v3" # stochastic, online knapsack with 200 items.
PENDULUM_MATLAB_V0 = "Pendulum_Matlab_v0"
PENDULUM_MATLAB_DOUBLE_RIP_V0 = "Pendulum_Matlab_Double_RIP_v0"
REAL_DEVICE_RIP = "Real_Device_Rip"
REAL_DEVICE_DOUBLE_RIP = "Real_Device_Double_Rip"
TRADE_V0 = "Trade_v0"
SUPER_MARIO_BROS = "SuperMarioBros-v0"
TOY_V0 = "TOY_V0"
class DeepLearningModelName(enum.Enum):
DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP = "DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP"
CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_MLP = "CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_MLP"
DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN = "DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN"
CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_CNN = "CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_CNN"
DISCRETE_STOCHASTIC_ACTOR_CRITIC_RNN = "DISCRETE_STOCHASTIC_ACTOR_CRITIC_RNN"
CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_RNN = "CONTINUOUS_STOCHASTIC_ACTOR_CRITIC_RNN"
DISCRETE_SAC_MLP = "DISCRETE_SAC_MLP"
CONTINUOUS_SAC_MLP = "CONTINUOUS_SAC_MLP"
DUELING_DQN_MLP = "DUELING_DQN_MLP"
DUELING_DQN_CNN = "DUELING_DQN_CNN"
DUELING_DQN_SMALL_CNN = "DUELING_DQN_SMALL_CNN"
RAINBOW_DQN_MLP = "RAINBOW_DQN_MLP"
CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_MLP = "CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_MLP"
CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_RNN = "CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_RNN"
TD3_MLP = "TD3_MLP"
SIMPLE_MLP = "SIMPLE_MLP"
SIMPLE_CNN = "SIMPLE_CNN"
SIMPLE_SMALL_CNN = "SIMPLE_SMALL_CNN"
class RLAlgorithmName(enum.Enum):
DQN_V0 = "DQN_V0"
RAINBOW_V0 = "RAINBOW_V0"
DDPG_V0 = "DDPG_V0"
D4PG_V0 = "D4PG_V0"
TD3_V0 = "TD3_V0"
DISCRETE_SAC_V0 = "DISCRETE_SAC_V0"
CONTINUOUS_SAC_V0 = "CONTINUOUS_SAC_V0"
DISCRETE_A2C_V0 = "DISCRETE_A2C_V0"
CONTINUOUS_A2C_V0 = "CONTINUOUS_A2C_V0"
POLICY_GRADIENT_V0 = "POLICY_GRADIENT_V0"
REINFORCE_V0 = "REINFORCE_V0"
DISCRETE_PPO_V0 = "DISCRETE_PPO_V0"
CONTINUOUS_PPO_V0 = "CONTINUOUS_PPO_V0"
EVOLUTION_STRATEGY = "EVOLUTION_STRATEGY"
GENETIC_ALGORITHM = "GENETIC_ALGORITHM"
MULTI_GENETIC_ALGORITHM = "MULTI_GENETIC_ALGORITHM"
class OptimizerName(enum.Enum):
NESTEROV = "nesterov"
ADAM = "Adam"
RMSProp = "RMSProp"
OFF_POLICY_RL_ALGORITHMS = [
RLAlgorithmName.DQN_V0,
RLAlgorithmName.DDPG_V0,
RLAlgorithmName.D4PG_V0,
RLAlgorithmName.RAINBOW_V0,
RLAlgorithmName.TD3_V0,
RLAlgorithmName.CONTINUOUS_SAC_V0,
RLAlgorithmName.DISCRETE_SAC_V0,
]
ON_POLICY_RL_ALGORITHMS = [
RLAlgorithmName.DISCRETE_A2C_V0,
RLAlgorithmName.CONTINUOUS_A2C_V0,
RLAlgorithmName.DISCRETE_PPO_V0,
RLAlgorithmName.CONTINUOUS_PPO_V0,
] | 0.252476 | 0.067762 |
import unittest
from google.appengine.ext import testbed
# Local imports
from speaker_lib import speaker, speakerdir
class TestSpeakerDir(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_retrieve_full_directory(self):
d = speakerdir.SpeakerDir()
self.assertEquals(0, len(d.get_speaker_list()))
s = speaker.make_new_speaker("mail@email")
s.put()
self.assertFalse(d.is_speaker_listed(s.key))
d.add_speaker(s.key)
self.assertTrue(d.is_speaker_listed(s.key))
speaker_list = d.get_speaker_list()
self.assertEquals(1, len(speaker_list))
self.assertEquals("mail@email", speaker_list[0].get().email)
s2 = speaker.make_new_speaker("harry@email")
s2.put()
d.add_speaker(s2.key)
s3 = speaker.make_new_speaker("ron@email")
s3.put()
d.add_speaker(s3.key)
self.assertEquals(3, len(d.get_speaker_list()))
def test_remove_speaker(self):
d = speakerdir.SpeakerDir()
self.assertEquals(0, len(d.get_speaker_list()))
s = speaker.make_new_speaker("mail@email")
s.put()
d.add_speaker(s.key)
s2 = speaker.make_new_speaker("harry@email")
s2.put()
d.add_speaker(s2.key)
self.assertEquals(2, len(d.get_speaker_list()))
self.assertTrue(d.is_speaker_listed(s.key))
self.assertTrue(d.is_speaker_listed(s2.key))
d.remove_speaker(s.key)
self.assertEquals(1, len(d.get_speaker_list()))
self.assertEquals("<EMAIL>", d.get_speaker_list()[0].get().email)
self.assertFalse(d.is_speaker_listed(s.key))
self.assertTrue(d.is_speaker_listed(s2.key))
def test_double_entry(self):
s = speaker.make_new_speaker("mail@email")
s.put()
speakerdir.SpeakerDir().add_speaker(s.key)
self.assertTrue(speakerdir.SpeakerDir().is_speaker_listed(s.key))
self.assertEquals(1, len(speakerdir.SpeakerDir().get_speaker_list()))
speakerdir.SpeakerDir().add_speaker(s.key)
self.assertEquals(1, len(speakerdir.SpeakerDir().get_speaker_list())) | speaker_lib/tests/testspeakerdir.py |
import unittest
from google.appengine.ext import testbed
# Local imports
from speaker_lib import speaker, speakerdir
class TestSpeakerDir(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_retrieve_full_directory(self):
d = speakerdir.SpeakerDir()
self.assertEquals(0, len(d.get_speaker_list()))
s = speaker.make_new_speaker("mail@email")
s.put()
self.assertFalse(d.is_speaker_listed(s.key))
d.add_speaker(s.key)
self.assertTrue(d.is_speaker_listed(s.key))
speaker_list = d.get_speaker_list()
self.assertEquals(1, len(speaker_list))
self.assertEquals("mail@email", speaker_list[0].get().email)
s2 = speaker.make_new_speaker("harry@email")
s2.put()
d.add_speaker(s2.key)
s3 = speaker.make_new_speaker("ron@email")
s3.put()
d.add_speaker(s3.key)
self.assertEquals(3, len(d.get_speaker_list()))
def test_remove_speaker(self):
d = speakerdir.SpeakerDir()
self.assertEquals(0, len(d.get_speaker_list()))
s = speaker.make_new_speaker("mail@email")
s.put()
d.add_speaker(s.key)
s2 = speaker.make_new_speaker("harry@email")
s2.put()
d.add_speaker(s2.key)
self.assertEquals(2, len(d.get_speaker_list()))
self.assertTrue(d.is_speaker_listed(s.key))
self.assertTrue(d.is_speaker_listed(s2.key))
d.remove_speaker(s.key)
self.assertEquals(1, len(d.get_speaker_list()))
self.assertEquals("<EMAIL>", d.get_speaker_list()[0].get().email)
self.assertFalse(d.is_speaker_listed(s.key))
self.assertTrue(d.is_speaker_listed(s2.key))
def test_double_entry(self):
s = speaker.make_new_speaker("mail@email")
s.put()
speakerdir.SpeakerDir().add_speaker(s.key)
self.assertTrue(speakerdir.SpeakerDir().is_speaker_listed(s.key))
self.assertEquals(1, len(speakerdir.SpeakerDir().get_speaker_list()))
speakerdir.SpeakerDir().add_speaker(s.key)
self.assertEquals(1, len(speakerdir.SpeakerDir().get_speaker_list())) | 0.423339 | 0.532364 |
__author__ = '<EMAIL> (<NAME>)'
import flags
from generation import Generation
import task
def _DecreaseFlag(flags_dict, spec):
"""Decrease the value of the flag that has the specification spec.
If the flag that contains the spec is a boolean flag, it is eliminated.
Otherwise the flag is a numeric flag, its value will be reduced by one.
Args:
flags_dict: The dictionary containing the original flags whose neighbors are
to be explored.
spec: The spec in the flags_dict is to be changed.
Returns:
Dictionary of neighbor flag that is only different from the original
dictionary by the spec.
"""
# The specification must be held by one of the flags.
assert spec in flags_dict
# The results this method returns.
results = flags_dict.copy()
# This method searches for a pattern [start-end] in the spec. If the spec
# contains this pattern, it is a numeric flag. Otherwise it is a boolean flag.
# For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is
# a boolean flag.
numeric_flag_match = flags.Search(spec)
if numeric_flag_match:
# numeric flag
val = results[spec].GetValue()
# If the value of the flag is the lower boundary of the specification, this
# flag will be turned off. Because it already contains the lowest value and
# can not be decreased any more.
if val == int(numeric_flag_match.group('start')):
# Turn off the flag. A flag is turned off if it is not presented in the
# flags_dict.
del results[spec]
else:
results[spec] = flags.Flag(spec, val - 1)
else:
# Turn off the flag. A flag is turned off if it is not presented in the
# flags_dict.
del results[spec]
return results
class IterativeEliminationGeneration(Generation):
"""The negative flag iterative elimination algorithm."""
def __init__(self, exe_set, parent_task):
"""Set up the base line parent task.
The parent task is the base line against which the new tasks are compared.
The new tasks are only different from the base line from one flag f by
either turning this flag f off, or lower the flag value by 1.
If a new task is better than the base line, one flag is identified that
gives degradation. The flag that give the worst degradation will be removed
or lower the value by 1 in the base in each iteration.
Args:
exe_set: A set of tasks to be run. Each one only differs from the
parent_task by one flag.
parent_task: The base line task, against which the new tasks in exe_set
are compared.
"""
Generation.__init__(self, exe_set, None)
self._parent_task = parent_task
def IsImproved(self):
"""Whether any new task has improvement upon the parent task."""
parent = self._parent_task
# Whether there is any new task that has improvement over the parent base
# line task.
for curr in [curr for curr in self.Pool() if curr != parent]:
if curr.IsImproved(parent):
return True
return False
def Next(self, cache):
"""Find out the flag that gives the worst degradation.
Found out the flag that gives the worst degradation. Turn that flag off from
the base line and use the new base line for the new generation.
Args:
cache: A set of tasks that have been generated before.
Returns:
A set of new generations.
"""
parent_task = self._parent_task
# Find out the task that gives the worst degradation.
worst_task = parent_task
for curr in [curr for curr in self.Pool() if curr != parent_task]:
# The method IsImproved, which is supposed to be called before, ensures
# that there is at least a task that improves upon the parent_task.
if curr.IsImproved(worst_task):
worst_task = curr
assert worst_task != parent_task
# The flags_set of the worst task.
work_flags_set = worst_task.GetFlags().GetFlags()
results = set([])
# If the flags_set contains no flag, i.e., all the flags have been
# eliminated, the algorithm stops.
if not work_flags_set:
return []
# Turn of the remaining flags one by one for the next generation.
for spec in work_flags_set:
flag_set = flags.FlagSet(_DecreaseFlag(work_flags_set, spec).values())
new_task = task.Task(flag_set)
if new_task not in cache:
results.add(new_task)
return [IterativeEliminationGeneration(results, worst_task)]
class IterativeEliminationFirstGeneration(IterativeEliminationGeneration):
"""The first iteration of the iterative elimination algorithm.
The first iteration also evaluates the base line task. The base line tasks in
the subsequent iterations have been evaluated. Therefore,
IterativeEliminationGeneration does not include the base line task in the
execution set.
"""
def IsImproved(self):
# Find out the base line task in the execution set.
parent = next(task for task in self.Pool() if task == self._parent_task)
self._parent_task = parent
return IterativeEliminationGeneration.IsImproved(self) | bestflags/iterative_elimination.py | __author__ = '<EMAIL> (<NAME>)'
import flags
from generation import Generation
import task
def _DecreaseFlag(flags_dict, spec):
"""Decrease the value of the flag that has the specification spec.
If the flag that contains the spec is a boolean flag, it is eliminated.
Otherwise the flag is a numeric flag, its value will be reduced by one.
Args:
flags_dict: The dictionary containing the original flags whose neighbors are
to be explored.
spec: The spec in the flags_dict is to be changed.
Returns:
Dictionary of neighbor flag that is only different from the original
dictionary by the spec.
"""
# The specification must be held by one of the flags.
assert spec in flags_dict
# The results this method returns.
results = flags_dict.copy()
# This method searches for a pattern [start-end] in the spec. If the spec
# contains this pattern, it is a numeric flag. Otherwise it is a boolean flag.
# For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is
# a boolean flag.
numeric_flag_match = flags.Search(spec)
if numeric_flag_match:
# numeric flag
val = results[spec].GetValue()
# If the value of the flag is the lower boundary of the specification, this
# flag will be turned off. Because it already contains the lowest value and
# can not be decreased any more.
if val == int(numeric_flag_match.group('start')):
# Turn off the flag. A flag is turned off if it is not presented in the
# flags_dict.
del results[spec]
else:
results[spec] = flags.Flag(spec, val - 1)
else:
# Turn off the flag. A flag is turned off if it is not presented in the
# flags_dict.
del results[spec]
return results
class IterativeEliminationGeneration(Generation):
"""The negative flag iterative elimination algorithm."""
def __init__(self, exe_set, parent_task):
"""Set up the base line parent task.
The parent task is the base line against which the new tasks are compared.
The new tasks are only different from the base line from one flag f by
either turning this flag f off, or lower the flag value by 1.
If a new task is better than the base line, one flag is identified that
gives degradation. The flag that give the worst degradation will be removed
or lower the value by 1 in the base in each iteration.
Args:
exe_set: A set of tasks to be run. Each one only differs from the
parent_task by one flag.
parent_task: The base line task, against which the new tasks in exe_set
are compared.
"""
Generation.__init__(self, exe_set, None)
self._parent_task = parent_task
def IsImproved(self):
"""Whether any new task has improvement upon the parent task."""
parent = self._parent_task
# Whether there is any new task that has improvement over the parent base
# line task.
for curr in [curr for curr in self.Pool() if curr != parent]:
if curr.IsImproved(parent):
return True
return False
def Next(self, cache):
"""Find out the flag that gives the worst degradation.
Found out the flag that gives the worst degradation. Turn that flag off from
the base line and use the new base line for the new generation.
Args:
cache: A set of tasks that have been generated before.
Returns:
A set of new generations.
"""
parent_task = self._parent_task
# Find out the task that gives the worst degradation.
worst_task = parent_task
for curr in [curr for curr in self.Pool() if curr != parent_task]:
# The method IsImproved, which is supposed to be called before, ensures
# that there is at least a task that improves upon the parent_task.
if curr.IsImproved(worst_task):
worst_task = curr
assert worst_task != parent_task
# The flags_set of the worst task.
work_flags_set = worst_task.GetFlags().GetFlags()
results = set([])
# If the flags_set contains no flag, i.e., all the flags have been
# eliminated, the algorithm stops.
if not work_flags_set:
return []
# Turn of the remaining flags one by one for the next generation.
for spec in work_flags_set:
flag_set = flags.FlagSet(_DecreaseFlag(work_flags_set, spec).values())
new_task = task.Task(flag_set)
if new_task not in cache:
results.add(new_task)
return [IterativeEliminationGeneration(results, worst_task)]
class IterativeEliminationFirstGeneration(IterativeEliminationGeneration):
"""The first iteration of the iterative elimination algorithm.
The first iteration also evaluates the base line task. The base line tasks in
the subsequent iterations have been evaluated. Therefore,
IterativeEliminationGeneration does not include the base line task in the
execution set.
"""
def IsImproved(self):
# Find out the base line task in the execution set.
parent = next(task for task in self.Pool() if task == self._parent_task)
self._parent_task = parent
return IterativeEliminationGeneration.IsImproved(self) | 0.870941 | 0.467089 |
import argparse
import json
from common.logger import get_logger
from common.application_exception import ApplicationException
logger = get_logger(__name__)
class CommandLineParser():
def get_options(self):
config = self.__validate_arguments()
return config
def __get_parser(self):
parser = argparse.ArgumentParser(description='Setup or configure the AWS services.')
parser.add_argument('--cluster-name', '-n', help='Identifies the name of the cluster being migrated', required=True)
parser.add_argument('--connection-string', '-c', help='Sets the connection string for the DocumentDB cluster.')
parser.add_argument('--event-writer', '-e', help='Sets the status of the event writer. Values: stop or start.')
parser.add_argument('--status', '-s', help='Displays the migration status and time gap details.', action='store_true')
parser.add_argument('--watch-status', '-w', help='Watch the migration status and time gap details in a loop.', action='store_true')
return parser
def __validate_arguments(self):
parser = self.__get_parser()
config = vars(parser.parse_args())
logger.info("Command line arguments given: " + json.dumps(config))
# Verify necessary components are supplied in command line arguments
command = []
if config["connection_string"]:
command.append("connection_string")
if config["status"]:
command.append("status")
if config["watch_status"]:
command.append("watch_status")
if not config["event_writer"] is None:
command.append("event_writer")
if len(command) == 0:
raise ApplicationException("Missing input argument for command. Specify --connection-string or --event-writer.")
if len(command) > 1:
raise ApplicationException("Please specify only one of the commands: --connection-string, --event_writer, --status or --watch-status arguments.")
config["command"] = command[0]
logger.info("Validated Command line arguments are: " + json.dumps(config))
if config["command"] == "event_writer":
if config["event_writer"] != "start" and config["event_writer"] != "stop":
raise ApplicationException("Given value for event-writer is not valid: {}. Valid values are stop or start".format(config["event_writer"]))
elif config["command"] == "connection_string":
if config["connection_string"] == "" or config["connection_string"] == None:
raise ApplicationException("Given value for connection-string is not valid: [{}].".format(config["connection_string"]))
return config | cosmos-db-migration-utility/src/configure/commandline_parser.py | import argparse
import json
from common.logger import get_logger
from common.application_exception import ApplicationException
logger = get_logger(__name__)
class CommandLineParser():
def get_options(self):
config = self.__validate_arguments()
return config
def __get_parser(self):
parser = argparse.ArgumentParser(description='Setup or configure the AWS services.')
parser.add_argument('--cluster-name', '-n', help='Identifies the name of the cluster being migrated', required=True)
parser.add_argument('--connection-string', '-c', help='Sets the connection string for the DocumentDB cluster.')
parser.add_argument('--event-writer', '-e', help='Sets the status of the event writer. Values: stop or start.')
parser.add_argument('--status', '-s', help='Displays the migration status and time gap details.', action='store_true')
parser.add_argument('--watch-status', '-w', help='Watch the migration status and time gap details in a loop.', action='store_true')
return parser
def __validate_arguments(self):
parser = self.__get_parser()
config = vars(parser.parse_args())
logger.info("Command line arguments given: " + json.dumps(config))
# Verify necessary components are supplied in command line arguments
command = []
if config["connection_string"]:
command.append("connection_string")
if config["status"]:
command.append("status")
if config["watch_status"]:
command.append("watch_status")
if not config["event_writer"] is None:
command.append("event_writer")
if len(command) == 0:
raise ApplicationException("Missing input argument for command. Specify --connection-string or --event-writer.")
if len(command) > 1:
raise ApplicationException("Please specify only one of the commands: --connection-string, --event_writer, --status or --watch-status arguments.")
config["command"] = command[0]
logger.info("Validated Command line arguments are: " + json.dumps(config))
if config["command"] == "event_writer":
if config["event_writer"] != "start" and config["event_writer"] != "stop":
raise ApplicationException("Given value for event-writer is not valid: {}. Valid values are stop or start".format(config["event_writer"]))
elif config["command"] == "connection_string":
if config["connection_string"] == "" or config["connection_string"] == None:
raise ApplicationException("Given value for connection-string is not valid: [{}].".format(config["connection_string"]))
return config | 0.379493 | 0.074265 |
import ConfigParser
import random
import json
import datetime
import time
import sqlite3
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
class Points:
def __init__(self,dbfile):
self._mydb = sqlite3.connect(dbfile,check_same_thread=False)
self._mycursor = self._mydb.cursor()
self._mycursor.execute('CREATE TABLE IF NOT EXISTS points(uid INTEGER, name TEXT,groupid INTEGER,balance INTEGER, PRIMARY KEY (uid,groupid))')
self._probfix = 0.03
self._probfloat = 0.03
self._probthreshold = 20
self._bonus = 5
return
def clearUser(self,uid,groupid):
clearsql = "DELETE FROM points WHERE uid = ? AND groupid = ?"
self._mycursor.execute(clearsql,(uid,groupid))
self._mydb.commit()
return
def clearGroup(self,groupid):
clearsql = "DELETE FROM points WHERE groupid = ?"
self._mycursor.execute(clearsql,(groupid,))
self._mydb.commit()
return
def getBalance(self,uid,groupid):
self._mycursor.execute("SELECT balance FROM points WHERE `uid` = {} and groupid = {}".format(uid,groupid))
res = self._mycursor.fetchone()
if res is None:
return 0
else:
return res[0]
'''
def getRecentChanges(self,uid):
self._mycursor.execute("SELECT *,unix_timestamp(ts) AS timestamp FROM `changelog` WHERE `uid` = {} ORDER BY height DESC LIMIT 10".format(uid))
changes=[]
currentts = time.time()
for each in self._mycursor.fetchall():
changes.append({"before":str(datetime.timedelta(seconds=int(currentts - each[6]))),"diff":each[2],"memo":each[4]})
return changes
'''
def getRank(self,groupid,rank):
sql = "SELECT * FROM `points` WHERE groupid = ? order by balance desc limit ?"
self._mycursor.execute(sql,(groupid,rank))
top = self._mycursor.fetchall()
return top[-1]
def getAbove(self,groupid,amount=10):
sql = "SELECT * FROM `points` WHERE groupid = ? AND balance >= ? order by balance desc"
self._mycursor.execute(sql,(groupid,amount))
toplist = self._mycursor.fetchall()
return toplist
def getBoard(self,groupid,top=10):
sql = "SELECT * FROM `points` WHERE groupid = ? order by balance desc limit ?"
self._mycursor.execute(sql,(groupid,top))
toplist = self._mycursor.fetchall()
return toplist
def changeBalance(self,uid,name,groupid,number):
logger.warning("%s mined one from %s",uid,groupid)
balance = self.getBalance(uid,groupid)
res = balance+number
if res < 0:
return
createsql = "INSERT OR IGNORE INTO points (uid,name,groupid,balance) VALUES (?,?,?,0)"
self._mycursor.execute(createsql,(uid,name,groupid))
self._mydb.commit()
updatesql = "UPDATE points SET balance = ? WHERE uid = ? AND groupid = ?"
self._mycursor.execute(updatesql,(res,uid,groupid))
self._mydb.commit()
def bonus(self,user,groupid):
bonus = int(1+self._bonus*random.random())
self.changeBalance(user.id,user.full_name,groupid,bonus)
return bonus
def mine(self,user,groupid):
balance = self.getBalance(user.id,groupid)
if balance == 0:
balance = 1
if random.random()<(self._probfix + self._probfloat*self._probthreshold/balance):
self.changeBalance(user.id,user.full_name,groupid,1)
return True
else:
return False | points.py | import ConfigParser
import random
import json
import datetime
import time
import sqlite3
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
class Points:
def __init__(self,dbfile):
self._mydb = sqlite3.connect(dbfile,check_same_thread=False)
self._mycursor = self._mydb.cursor()
self._mycursor.execute('CREATE TABLE IF NOT EXISTS points(uid INTEGER, name TEXT,groupid INTEGER,balance INTEGER, PRIMARY KEY (uid,groupid))')
self._probfix = 0.03
self._probfloat = 0.03
self._probthreshold = 20
self._bonus = 5
return
def clearUser(self,uid,groupid):
clearsql = "DELETE FROM points WHERE uid = ? AND groupid = ?"
self._mycursor.execute(clearsql,(uid,groupid))
self._mydb.commit()
return
def clearGroup(self,groupid):
clearsql = "DELETE FROM points WHERE groupid = ?"
self._mycursor.execute(clearsql,(groupid,))
self._mydb.commit()
return
def getBalance(self,uid,groupid):
self._mycursor.execute("SELECT balance FROM points WHERE `uid` = {} and groupid = {}".format(uid,groupid))
res = self._mycursor.fetchone()
if res is None:
return 0
else:
return res[0]
'''
def getRecentChanges(self,uid):
self._mycursor.execute("SELECT *,unix_timestamp(ts) AS timestamp FROM `changelog` WHERE `uid` = {} ORDER BY height DESC LIMIT 10".format(uid))
changes=[]
currentts = time.time()
for each in self._mycursor.fetchall():
changes.append({"before":str(datetime.timedelta(seconds=int(currentts - each[6]))),"diff":each[2],"memo":each[4]})
return changes
'''
def getRank(self,groupid,rank):
sql = "SELECT * FROM `points` WHERE groupid = ? order by balance desc limit ?"
self._mycursor.execute(sql,(groupid,rank))
top = self._mycursor.fetchall()
return top[-1]
def getAbove(self,groupid,amount=10):
sql = "SELECT * FROM `points` WHERE groupid = ? AND balance >= ? order by balance desc"
self._mycursor.execute(sql,(groupid,amount))
toplist = self._mycursor.fetchall()
return toplist
def getBoard(self,groupid,top=10):
sql = "SELECT * FROM `points` WHERE groupid = ? order by balance desc limit ?"
self._mycursor.execute(sql,(groupid,top))
toplist = self._mycursor.fetchall()
return toplist
def changeBalance(self,uid,name,groupid,number):
logger.warning("%s mined one from %s",uid,groupid)
balance = self.getBalance(uid,groupid)
res = balance+number
if res < 0:
return
createsql = "INSERT OR IGNORE INTO points (uid,name,groupid,balance) VALUES (?,?,?,0)"
self._mycursor.execute(createsql,(uid,name,groupid))
self._mydb.commit()
updatesql = "UPDATE points SET balance = ? WHERE uid = ? AND groupid = ?"
self._mycursor.execute(updatesql,(res,uid,groupid))
self._mydb.commit()
def bonus(self,user,groupid):
bonus = int(1+self._bonus*random.random())
self.changeBalance(user.id,user.full_name,groupid,bonus)
return bonus
def mine(self,user,groupid):
balance = self.getBalance(user.id,groupid)
if balance == 0:
balance = 1
if random.random()<(self._probfix + self._probfloat*self._probthreshold/balance):
self.changeBalance(user.id,user.full_name,groupid,1)
return True
else:
return False | 0.313315 | 0.057998 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='accountuser',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='accountnote',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='accounts.Account'),
),
migrations.AddField(
model_name='accountinvite',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invites', to='accounts.Account'),
),
migrations.AddField(
model_name='account',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this account belongs to. An account will get all permissions granted to each of its groups.', to='accounts.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='account',
name='owner',
field=models.ForeignKey(help_text='User who owns or administrates this account', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='account', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='account',
name='permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permission for this account', to='accounts.Permission', verbose_name='permissions'),
),
migrations.AddField(
model_name='account',
name='users',
field=models.ManyToManyField(blank=True, help_text='Members of the company represented by this account.', related_name='accounts', through='accounts.AccountUser', to=settings.AUTH_USER_MODEL, verbose_name='users'),
),
] | vapor_manager/accounts/migrations/0002_auto_20200319_1756.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='accountuser',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='accountnote',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='accounts.Account'),
),
migrations.AddField(
model_name='accountinvite',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invites', to='accounts.Account'),
),
migrations.AddField(
model_name='account',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this account belongs to. An account will get all permissions granted to each of its groups.', to='accounts.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='account',
name='owner',
field=models.ForeignKey(help_text='User who owns or administrates this account', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='account', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='account',
name='permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permission for this account', to='accounts.Permission', verbose_name='permissions'),
),
migrations.AddField(
model_name='account',
name='users',
field=models.ManyToManyField(blank=True, help_text='Members of the company represented by this account.', related_name='accounts', through='accounts.AccountUser', to=settings.AUTH_USER_MODEL, verbose_name='users'),
),
] | 0.532911 | 0.115811 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
tf.enable_eager_execution()
def get_type_id(tgt_len, tgt_idx, type_val):
tgt_idx_left_shift = tgt_idx[:-1]
type_val_right_shift = type_val[1:]
new_type_id_shift = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx_left_shift[:, None],
updates=type_val_right_shift
)
new_type_id_shift = tf.concat([type_val[:1], new_type_id_shift], axis=0)
new_type_id_shift = tf.math.cumsum(new_type_id_shift, exclusive=True)[1:]
new_type_id = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=type_val
)
new_type_id = tf.math.cumsum(new_type_id, exclusive=True)
new_type_id = new_type_id_shift - new_type_id
return new_type_id
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
seq_len = 16
inputs = tf.range(1, seq_len + 1, 1, dtype=tf.int32)
type_id = tf.range(1, seq_len + 1, 1, dtype=tf.int32)
del_ratio = 0.1
add_ratio = 0.1
rep_ratio = 0.2
rep_label = -1
add_label = -2
del_label = -3
del_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
del_mask = del_rand < del_ratio
non_del_mask = tf.logical_not(del_mask)
right_shift_del_mask = tf.concat(
[tf.constant(False, shape=[1]), del_mask[:-1]], axis=0)
non_add_mask = tf.logical_or(del_mask, right_shift_del_mask)
add_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
add_num = tf.reduce_sum(tf.cast(add_rand < add_ratio, tf.int32))
add_uniform = tf.random.uniform(shape=[add_num, seq_len], minval=0, maxval=1)
add_uniform -= 1e5 * tf.cast(non_add_mask, tf.float32)
add_idx = tf.argmax(add_uniform, axis=1)
add_cnt = tf.reduce_sum(tf.one_hot(add_idx, seq_len, dtype=tf.int32), 0)
rep_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
rep_mask = tf.logical_and(tf.equal(add_cnt, 0), tf.logical_not(non_add_mask))
rep_mask = tf.logical_and(
rep_rand < (rep_ratio / (1 - 2 * del_ratio - add_ratio)), rep_mask)
rep_input = tf.where(
rep_mask,
tf.constant(rep_label, shape=[seq_len]),
inputs)
tgt_len_encoder = tgt_len_decoder = seq_len
print("rep", tf.cast(rep_mask, tf.int32).numpy().tolist())
print("add", add_cnt.numpy().tolist())
print("del", tf.cast(del_mask, tf.int32).numpy().tolist())
ori_idx = tf.range(seq_len)
#### encoder input
shift_val = add_cnt - tf.cast(del_mask, tf.int32)
shift_val = tf.cumsum(shift_val)
shift_idx = ori_idx + shift_val
tgt_len = tgt_len_encoder
valid_tgt = shift_idx < tgt_len
# remove deleted token
tgt_idx = tf.boolean_mask(shift_idx, tf.logical_and(non_del_mask, valid_tgt))
tgt_val = tf.boolean_mask(rep_input, tf.logical_and(non_del_mask, valid_tgt))
type_val = tf.boolean_mask(type_id, tf.logical_and(non_del_mask, valid_tgt))
max_len = tf.math.reduce_max(tgt_idx) + 1
enc_type_id = get_type_id(tgt_len, tgt_idx, type_val)
enc_seq = tf.scatter_nd(
shape=[tgt_len],
indices=tf.range(0, max_len)[:, None],
updates=tf.zeros(shape=[max_len], dtype=tf.int32) + add_label
)
enc_seq = tf.tensor_scatter_nd_update(
enc_seq,
indices=tgt_idx[:, None],
updates=tgt_val)
print("encoder input")
print(enc_seq.numpy().tolist())
print(enc_type_id.numpy().tolist())
#### decoder
shift_val = tf.cumsum(add_cnt)
shift_idx = ori_idx + shift_val
tgt_len = tgt_len_decoder
valid_tgt = shift_idx < tgt_len
tgt_idx = tf.boolean_mask(shift_idx, valid_tgt)
tgt_val = tf.boolean_mask(inputs, valid_tgt)
type_val = tf.boolean_mask(type_id, valid_tgt)
max_len = tf.math.reduce_max(tgt_idx) + 1
pad_id = 100
eos_id = 101
add_id = 102
dec_type_id = get_type_id(tgt_len, tgt_idx, type_val)
dec_seq = tf.concat(
[tf.zeros(shape=[max_len], dtype=tf.int32) + add_id,
tf.zeros(shape=[tgt_len - max_len], dtype=tf.int32) + pad_id], 0)
dec_seq = tf.tensor_scatter_nd_update(
dec_seq,
indices=tgt_idx[:, None],
updates=tgt_val)
# decoder input
dec_inp = tf.concat([tf.constant(eos_id, shape=[1]), dec_seq[:-1]], 0)
# edit type label
dec_add_mask = tf.equal(dec_seq, add_id)
dec_rep_mask = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=tf.boolean_mask(rep_mask, valid_tgt)
)
dec_del_mask = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=tf.boolean_mask(del_mask, valid_tgt)
)
edit_label = tf.cast(dec_add_mask, tf.int32) * add_label
edit_label += tf.cast(dec_rep_mask, tf.int32) * rep_label
edit_label += tf.cast(dec_del_mask, tf.int32) * del_label
print("decoder")
print("inputs", dec_inp.numpy().tolist())
print("target", dec_seq.numpy().tolist())
print("labels", edit_label.numpy().tolist())
print("type_id", dec_type_id.numpy().tolist())
if __name__ == "__main__":
app.run(main) | pretrain/seq2seq_edit_tf.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
tf.enable_eager_execution()
def get_type_id(tgt_len, tgt_idx, type_val):
tgt_idx_left_shift = tgt_idx[:-1]
type_val_right_shift = type_val[1:]
new_type_id_shift = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx_left_shift[:, None],
updates=type_val_right_shift
)
new_type_id_shift = tf.concat([type_val[:1], new_type_id_shift], axis=0)
new_type_id_shift = tf.math.cumsum(new_type_id_shift, exclusive=True)[1:]
new_type_id = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=type_val
)
new_type_id = tf.math.cumsum(new_type_id, exclusive=True)
new_type_id = new_type_id_shift - new_type_id
return new_type_id
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
seq_len = 16
inputs = tf.range(1, seq_len + 1, 1, dtype=tf.int32)
type_id = tf.range(1, seq_len + 1, 1, dtype=tf.int32)
del_ratio = 0.1
add_ratio = 0.1
rep_ratio = 0.2
rep_label = -1
add_label = -2
del_label = -3
del_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
del_mask = del_rand < del_ratio
non_del_mask = tf.logical_not(del_mask)
right_shift_del_mask = tf.concat(
[tf.constant(False, shape=[1]), del_mask[:-1]], axis=0)
non_add_mask = tf.logical_or(del_mask, right_shift_del_mask)
add_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
add_num = tf.reduce_sum(tf.cast(add_rand < add_ratio, tf.int32))
add_uniform = tf.random.uniform(shape=[add_num, seq_len], minval=0, maxval=1)
add_uniform -= 1e5 * tf.cast(non_add_mask, tf.float32)
add_idx = tf.argmax(add_uniform, axis=1)
add_cnt = tf.reduce_sum(tf.one_hot(add_idx, seq_len, dtype=tf.int32), 0)
rep_rand = tf.random.uniform(shape=[seq_len], minval=0, maxval=1)
rep_mask = tf.logical_and(tf.equal(add_cnt, 0), tf.logical_not(non_add_mask))
rep_mask = tf.logical_and(
rep_rand < (rep_ratio / (1 - 2 * del_ratio - add_ratio)), rep_mask)
rep_input = tf.where(
rep_mask,
tf.constant(rep_label, shape=[seq_len]),
inputs)
tgt_len_encoder = tgt_len_decoder = seq_len
print("rep", tf.cast(rep_mask, tf.int32).numpy().tolist())
print("add", add_cnt.numpy().tolist())
print("del", tf.cast(del_mask, tf.int32).numpy().tolist())
ori_idx = tf.range(seq_len)
#### encoder input
shift_val = add_cnt - tf.cast(del_mask, tf.int32)
shift_val = tf.cumsum(shift_val)
shift_idx = ori_idx + shift_val
tgt_len = tgt_len_encoder
valid_tgt = shift_idx < tgt_len
# remove deleted token
tgt_idx = tf.boolean_mask(shift_idx, tf.logical_and(non_del_mask, valid_tgt))
tgt_val = tf.boolean_mask(rep_input, tf.logical_and(non_del_mask, valid_tgt))
type_val = tf.boolean_mask(type_id, tf.logical_and(non_del_mask, valid_tgt))
max_len = tf.math.reduce_max(tgt_idx) + 1
enc_type_id = get_type_id(tgt_len, tgt_idx, type_val)
enc_seq = tf.scatter_nd(
shape=[tgt_len],
indices=tf.range(0, max_len)[:, None],
updates=tf.zeros(shape=[max_len], dtype=tf.int32) + add_label
)
enc_seq = tf.tensor_scatter_nd_update(
enc_seq,
indices=tgt_idx[:, None],
updates=tgt_val)
print("encoder input")
print(enc_seq.numpy().tolist())
print(enc_type_id.numpy().tolist())
#### decoder
shift_val = tf.cumsum(add_cnt)
shift_idx = ori_idx + shift_val
tgt_len = tgt_len_decoder
valid_tgt = shift_idx < tgt_len
tgt_idx = tf.boolean_mask(shift_idx, valid_tgt)
tgt_val = tf.boolean_mask(inputs, valid_tgt)
type_val = tf.boolean_mask(type_id, valid_tgt)
max_len = tf.math.reduce_max(tgt_idx) + 1
pad_id = 100
eos_id = 101
add_id = 102
dec_type_id = get_type_id(tgt_len, tgt_idx, type_val)
dec_seq = tf.concat(
[tf.zeros(shape=[max_len], dtype=tf.int32) + add_id,
tf.zeros(shape=[tgt_len - max_len], dtype=tf.int32) + pad_id], 0)
dec_seq = tf.tensor_scatter_nd_update(
dec_seq,
indices=tgt_idx[:, None],
updates=tgt_val)
# decoder input
dec_inp = tf.concat([tf.constant(eos_id, shape=[1]), dec_seq[:-1]], 0)
# edit type label
dec_add_mask = tf.equal(dec_seq, add_id)
dec_rep_mask = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=tf.boolean_mask(rep_mask, valid_tgt)
)
dec_del_mask = tf.scatter_nd(
shape=[tgt_len],
indices=tgt_idx[:, None],
updates=tf.boolean_mask(del_mask, valid_tgt)
)
edit_label = tf.cast(dec_add_mask, tf.int32) * add_label
edit_label += tf.cast(dec_rep_mask, tf.int32) * rep_label
edit_label += tf.cast(dec_del_mask, tf.int32) * del_label
print("decoder")
print("inputs", dec_inp.numpy().tolist())
print("target", dec_seq.numpy().tolist())
print("labels", edit_label.numpy().tolist())
print("type_id", dec_type_id.numpy().tolist())
if __name__ == "__main__":
app.run(main) | 0.5564 | 0.167593 |
from unittest import TestCase, main
from program_graphs import CFG
from program_graphs.cfg.edge_contraction import is_possible_to_contract, edge_contraction_all
import networkx as nx # type: ignore
class TestCFGEdgeContraction(TestCase):
def test_is_contraction_possible_linear(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3)
])
self.assertTrue(is_possible_to_contract(cfg, (1, 2)))
self.assertTrue(is_possible_to_contract(cfg, (2, 3)))
def test_is_contraction_possible_simple_cycle(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (3, 2)
])
self.assertFalse(is_possible_to_contract(cfg, (1, 2)))
self.assertTrue(is_possible_to_contract(cfg, (2, 3)))
self.assertFalse(is_possible_to_contract(cfg, (3, 4)))
self.assertFalse(is_possible_to_contract(cfg, (3, 2)))
def test_is_contraction_possible_two_nested_cycles(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (4, 5),
(3, 3), (4, 2)
])
self.assertFalse(is_possible_to_contract(cfg, (3, 3)))
self.assertFalse(is_possible_to_contract(cfg, (4, 2)))
self.assertFalse(is_possible_to_contract(cfg, (1, 2)))
self.assertFalse(is_possible_to_contract(cfg, (2, 3)))
self.assertFalse(is_possible_to_contract(cfg, (3, 4)))
self.assertFalse(is_possible_to_contract(cfg, (4, 5)))
def test_is_contraction_possible_two_nested_cycles_2(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (4, 5),
(3, 2), (4, 2)
])
self.assertTrue(is_possible_to_contract(cfg, (2, 3)))
def test_is_contraction_possible_another_cycle(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (4, 2), (2, 5)
])
self.assertTrue(is_possible_to_contract(cfg, (3, 4)))
self.assertFalse(is_possible_to_contract(cfg, (2, 3)))
def test_is_contraction_possible_return_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_return_node(node_1)
self.assertFalse(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_return_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_return_node(node_2)
self.assertTrue(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_break_node_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_break_node(node_1)
self.assertFalse(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_break_node_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_break_node(node_2)
self.assertTrue(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_continue_node_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_continue_node(node_1)
self.assertFalse(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_continue_node_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_continue_node(node_2)
self.assertTrue(is_possible_to_contract(cfg, (node_1, node_2)))
def test_edge_contraction_case_1(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
node_3 = cfg.add_node([3])
node_4 = cfg.add_node([4])
cfg.add_edges_from([(node_1, node_2), (node_2, node_3), (node_3, node_4)])
cfg = edge_contraction_all(cfg)
self.assertEqual(len(cfg.nodes()), 1)
def test_edge_contraction_case_cycle_of_length_one(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
node_3 = cfg.add_node([3])
node_4 = cfg.add_node([4])
cfg.add_edges_from([(node_1, node_2), (node_2, node_3), (node_3, node_4), (node_3, node_2)])
cfg = edge_contraction_all(cfg)
self.assertTrue(
nx.algorithms.is_isomorphic(cfg, nx.DiGraph([(1, 2), (2, 3), (2, 2)]))
)
def test_edge_contraction_case_cycle_of_length_two(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
node_3 = cfg.add_node([3])
node_4 = cfg.add_node([4])
node_5 = cfg.add_node([5])
cfg.add_edges_from([(node_1, node_2), (node_2, node_3), (node_3, node_4), (node_4, node_5), (node_4, node_2)])
cfg = edge_contraction_all(cfg)
self.assertTrue(
nx.algorithms.is_isomorphic(cfg, nx.DiGraph([(1, 2), (2, 3), (2, 2)]))
)
def test_edge_contraction_case_2(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([])
node_2 = cfg.add_node([])
node_3 = cfg.add_node([])
node_4 = cfg.add_node([])
node_5 = cfg.add_node([])
node_6 = cfg.add_node([])
cfg.add_edges_from([
(node_1, node_2),
(node_2, node_3),
(node_3, node_4),
(node_4, node_5),
(node_3, node_6),
(node_5, node_6)
])
cfg = edge_contraction_all(cfg)
self.assertTrue(
nx.algorithms.is_isomorphic(cfg, nx.DiGraph([(1, 3), (1, 2), (2, 3)]))
)
def test_edge_contraction_name_assignment_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([], name='A')
node_2 = cfg.add_node([])
cfg.add_edges_from([(node_1, node_2)])
cfg = edge_contraction_all(cfg)
self.assertIsNotNone(cfg.find_node_by_name('A'))
def test_edge_contraction_name_assignment_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([])
node_2 = cfg.add_node([], name='B')
cfg.add_edges_from([(node_1, node_2)])
cfg = edge_contraction_all(cfg)
self.assertIsNotNone(cfg.find_node_by_name('B'))
def test_edge_contraction_name_assignment_both(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([], name='A')
node_2 = cfg.add_node([], name='B')
cfg.add_edges_from([(node_1, node_2)])
cfg = edge_contraction_all(cfg)
self.assertIsNotNone(cfg.find_node_by_name('B'))
def test_blocks_merging(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([
(node_1, node_2)
])
cfg = edge_contraction_all(cfg)
self.assertIn([1, 2], cfg.node_id_2_block)
if __name__ == '__main__':
main() | program_graphs/cfg/tests/test_edge_contraction.py | from unittest import TestCase, main
from program_graphs import CFG
from program_graphs.cfg.edge_contraction import is_possible_to_contract, edge_contraction_all
import networkx as nx # type: ignore
class TestCFGEdgeContraction(TestCase):
def test_is_contraction_possible_linear(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3)
])
self.assertTrue(is_possible_to_contract(cfg, (1, 2)))
self.assertTrue(is_possible_to_contract(cfg, (2, 3)))
def test_is_contraction_possible_simple_cycle(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (3, 2)
])
self.assertFalse(is_possible_to_contract(cfg, (1, 2)))
self.assertTrue(is_possible_to_contract(cfg, (2, 3)))
self.assertFalse(is_possible_to_contract(cfg, (3, 4)))
self.assertFalse(is_possible_to_contract(cfg, (3, 2)))
def test_is_contraction_possible_two_nested_cycles(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (4, 5),
(3, 3), (4, 2)
])
self.assertFalse(is_possible_to_contract(cfg, (3, 3)))
self.assertFalse(is_possible_to_contract(cfg, (4, 2)))
self.assertFalse(is_possible_to_contract(cfg, (1, 2)))
self.assertFalse(is_possible_to_contract(cfg, (2, 3)))
self.assertFalse(is_possible_to_contract(cfg, (3, 4)))
self.assertFalse(is_possible_to_contract(cfg, (4, 5)))
def test_is_contraction_possible_two_nested_cycles_2(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (4, 5),
(3, 2), (4, 2)
])
self.assertTrue(is_possible_to_contract(cfg, (2, 3)))
def test_is_contraction_possible_another_cycle(self) -> None:
cfg = CFG()
cfg.add_edges_from([
(1, 2), (2, 3), (3, 4), (4, 2), (2, 5)
])
self.assertTrue(is_possible_to_contract(cfg, (3, 4)))
self.assertFalse(is_possible_to_contract(cfg, (2, 3)))
def test_is_contraction_possible_return_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_return_node(node_1)
self.assertFalse(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_return_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_return_node(node_2)
self.assertTrue(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_break_node_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_break_node(node_1)
self.assertFalse(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_break_node_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_break_node(node_2)
self.assertTrue(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_continue_node_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_continue_node(node_1)
self.assertFalse(is_possible_to_contract(cfg, (node_1, node_2)))
def test_is_contraction_possible_continue_node_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([(node_1, node_2)])
cfg.add_continue_node(node_2)
self.assertTrue(is_possible_to_contract(cfg, (node_1, node_2)))
def test_edge_contraction_case_1(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
node_3 = cfg.add_node([3])
node_4 = cfg.add_node([4])
cfg.add_edges_from([(node_1, node_2), (node_2, node_3), (node_3, node_4)])
cfg = edge_contraction_all(cfg)
self.assertEqual(len(cfg.nodes()), 1)
def test_edge_contraction_case_cycle_of_length_one(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
node_3 = cfg.add_node([3])
node_4 = cfg.add_node([4])
cfg.add_edges_from([(node_1, node_2), (node_2, node_3), (node_3, node_4), (node_3, node_2)])
cfg = edge_contraction_all(cfg)
self.assertTrue(
nx.algorithms.is_isomorphic(cfg, nx.DiGraph([(1, 2), (2, 3), (2, 2)]))
)
def test_edge_contraction_case_cycle_of_length_two(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
node_3 = cfg.add_node([3])
node_4 = cfg.add_node([4])
node_5 = cfg.add_node([5])
cfg.add_edges_from([(node_1, node_2), (node_2, node_3), (node_3, node_4), (node_4, node_5), (node_4, node_2)])
cfg = edge_contraction_all(cfg)
self.assertTrue(
nx.algorithms.is_isomorphic(cfg, nx.DiGraph([(1, 2), (2, 3), (2, 2)]))
)
def test_edge_contraction_case_2(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([])
node_2 = cfg.add_node([])
node_3 = cfg.add_node([])
node_4 = cfg.add_node([])
node_5 = cfg.add_node([])
node_6 = cfg.add_node([])
cfg.add_edges_from([
(node_1, node_2),
(node_2, node_3),
(node_3, node_4),
(node_4, node_5),
(node_3, node_6),
(node_5, node_6)
])
cfg = edge_contraction_all(cfg)
self.assertTrue(
nx.algorithms.is_isomorphic(cfg, nx.DiGraph([(1, 3), (1, 2), (2, 3)]))
)
def test_edge_contraction_name_assignment_left(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([], name='A')
node_2 = cfg.add_node([])
cfg.add_edges_from([(node_1, node_2)])
cfg = edge_contraction_all(cfg)
self.assertIsNotNone(cfg.find_node_by_name('A'))
def test_edge_contraction_name_assignment_right(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([])
node_2 = cfg.add_node([], name='B')
cfg.add_edges_from([(node_1, node_2)])
cfg = edge_contraction_all(cfg)
self.assertIsNotNone(cfg.find_node_by_name('B'))
def test_edge_contraction_name_assignment_both(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([], name='A')
node_2 = cfg.add_node([], name='B')
cfg.add_edges_from([(node_1, node_2)])
cfg = edge_contraction_all(cfg)
self.assertIsNotNone(cfg.find_node_by_name('B'))
def test_blocks_merging(self) -> None:
cfg = CFG()
node_1 = cfg.add_node([1])
node_2 = cfg.add_node([2])
cfg.add_edges_from([
(node_1, node_2)
])
cfg = edge_contraction_all(cfg)
self.assertIn([1, 2], cfg.node_id_2_block)
if __name__ == '__main__':
main() | 0.604749 | 0.627866 |
import sys
def addFilename(outFile, filename, element):
outFile.write('/**\n')
outFile.write(' * @file: {0}\n'.format(filename))
outFile.write(' * @brief: Implementation of the {0} class\n'.format(element))
outFile.write(' * @author: <NAME>\n *\n')
def addLicence(outFile):
outFile.write(' * <!--------------------------------------------------------------------------\n')
outFile.write(' * This file is part of libSEDML. Please visit http://sed-ml.org for more\n')
outFile.write(' * information about SED-ML. The latest version of libSEDML can be found on\n')
outFile.write(' * github: https://github.com/fbergmann/libSEDML/\n')
outFile.write(' *\n')
outFile.write(' * Copyright (c) 2013-2016, <NAME>\n')
outFile.write(' * All rights reserved.\n')
outFile.write(' *\n')
outFile.write(' * Redistribution and use in source and binary forms, with or without\n')
outFile.write(' * modification, are permitted provided that the following conditions are met:\n')
outFile.write(' *\n')
outFile.write(' * 1. Redistributions of source code must retain the above copyright notice, this\n')
outFile.write(' * list of conditions and the following disclaimer.\n')
outFile.write(' * 2. Redistributions in binary form must reproduce the above copyright notice,\n')
outFile.write(' * this list of conditions and the following disclaimer in the documentation\n')
outFile.write(' * and/or other materials provided with the distribution.\n')
outFile.write(' *\n')
outFile.write(' * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND\n')
outFile.write(' * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n')
outFile.write(' * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n')
outFile.write(' * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n')
outFile.write(' * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n')
outFile.write(' * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n')
outFile.write(' * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n')
outFile.write(' * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n')
outFile.write(' * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n')
outFile.write(' * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n')
outFile.write(' * ------------------------------------------------------------------------ -->\n')
outFile.write(' */\n') | dev/fileHeaders.py |
import sys
def addFilename(outFile, filename, element):
outFile.write('/**\n')
outFile.write(' * @file: {0}\n'.format(filename))
outFile.write(' * @brief: Implementation of the {0} class\n'.format(element))
outFile.write(' * @author: <NAME>\n *\n')
def addLicence(outFile):
outFile.write(' * <!--------------------------------------------------------------------------\n')
outFile.write(' * This file is part of libSEDML. Please visit http://sed-ml.org for more\n')
outFile.write(' * information about SED-ML. The latest version of libSEDML can be found on\n')
outFile.write(' * github: https://github.com/fbergmann/libSEDML/\n')
outFile.write(' *\n')
outFile.write(' * Copyright (c) 2013-2016, <NAME>\n')
outFile.write(' * All rights reserved.\n')
outFile.write(' *\n')
outFile.write(' * Redistribution and use in source and binary forms, with or without\n')
outFile.write(' * modification, are permitted provided that the following conditions are met:\n')
outFile.write(' *\n')
outFile.write(' * 1. Redistributions of source code must retain the above copyright notice, this\n')
outFile.write(' * list of conditions and the following disclaimer.\n')
outFile.write(' * 2. Redistributions in binary form must reproduce the above copyright notice,\n')
outFile.write(' * this list of conditions and the following disclaimer in the documentation\n')
outFile.write(' * and/or other materials provided with the distribution.\n')
outFile.write(' *\n')
outFile.write(' * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND\n')
outFile.write(' * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n')
outFile.write(' * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n')
outFile.write(' * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n')
outFile.write(' * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n')
outFile.write(' * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n')
outFile.write(' * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n')
outFile.write(' * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n')
outFile.write(' * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n')
outFile.write(' * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n')
outFile.write(' * ------------------------------------------------------------------------ -->\n')
outFile.write(' */\n') | 0.312895 | 0.031351 |
import os
import random
import re
import requests
import shutil
import tempfile
SAVE_DIR = tempfile.gettempdir()
PAGE_WITH_IMAGES_URLS_REGEX = re.compile(
r"<div class=\"thumb-container-big \" id=\"thumb_([0-9]+)\">"
)
IMAGE_URL_REGEX = re.compile(
r"<img class=\"main-content\" width=\"[0-9]+\" height=\"[0-9]+\" src=\"([^\"]+)\""
)
IMAGE_LIST_URL_TEMPLATE = "https://wall.alphacoders.com/by_category.php?id=3&name=Anime+Wallpapers&page={page_num}"
PAGE_WITH_IMAGE_URL_TEMPLATE = "https://wall.alphacoders.com/big.php?i={image_id}"
LAST_PAGE_NUMBER = 500
def save_image(url: str):
image_format = re.search(r"\.([^\.]+)$", url).group(1)
response = requests.get(url, stream=True)
if response.status_code != 200:
raise ConnectionError(f"Faild to save image, code={response.status_code}")
file_path = os.path.join(SAVE_DIR, f"animebg.{image_format}")
with open(file_path, "wb") as file:
file.raw.decode_content = True
shutil.copyfileobj(response.raw, file)
return file_path
def get_random_image_list_url():
page_num = random.randrange(1, LAST_PAGE_NUMBER)
return IMAGE_LIST_URL_TEMPLATE.format(page_num=page_num)
def get_page_with_image_url(image_list_url: str):
response = requests.get(image_list_url)
html = response.text
if response.status_code != 200:
raise ConnectionError(
f"Faild to get page with image url, code={response.status_code}"
)
search_results = PAGE_WITH_IMAGES_URLS_REGEX.findall(html)
image_id = random.choice(search_results)
return PAGE_WITH_IMAGE_URL_TEMPLATE.format(image_id=image_id)
def get_image_url(image_in_site_url: str):
response = requests.get(image_in_site_url)
html = response.text
if response.status_code != 200:
raise ConnectionError(f"Faild to get image url, code={response.status_code}")
return IMAGE_URL_REGEX.search(html).group(1)
if __name__ == "__main__":
random_list_url = get_random_image_list_url()
image_in_site_url = get_page_with_image_url(random_list_url)
image_url = get_image_url(image_in_site_url)
file_path = save_image(image_url)
print(file_path) | user/.scripts/anime_bg/anime_bg.py |
import os
import random
import re
import requests
import shutil
import tempfile
SAVE_DIR = tempfile.gettempdir()
PAGE_WITH_IMAGES_URLS_REGEX = re.compile(
r"<div class=\"thumb-container-big \" id=\"thumb_([0-9]+)\">"
)
IMAGE_URL_REGEX = re.compile(
r"<img class=\"main-content\" width=\"[0-9]+\" height=\"[0-9]+\" src=\"([^\"]+)\""
)
IMAGE_LIST_URL_TEMPLATE = "https://wall.alphacoders.com/by_category.php?id=3&name=Anime+Wallpapers&page={page_num}"
PAGE_WITH_IMAGE_URL_TEMPLATE = "https://wall.alphacoders.com/big.php?i={image_id}"
LAST_PAGE_NUMBER = 500
def save_image(url: str):
image_format = re.search(r"\.([^\.]+)$", url).group(1)
response = requests.get(url, stream=True)
if response.status_code != 200:
raise ConnectionError(f"Faild to save image, code={response.status_code}")
file_path = os.path.join(SAVE_DIR, f"animebg.{image_format}")
with open(file_path, "wb") as file:
file.raw.decode_content = True
shutil.copyfileobj(response.raw, file)
return file_path
def get_random_image_list_url():
page_num = random.randrange(1, LAST_PAGE_NUMBER)
return IMAGE_LIST_URL_TEMPLATE.format(page_num=page_num)
def get_page_with_image_url(image_list_url: str):
response = requests.get(image_list_url)
html = response.text
if response.status_code != 200:
raise ConnectionError(
f"Faild to get page with image url, code={response.status_code}"
)
search_results = PAGE_WITH_IMAGES_URLS_REGEX.findall(html)
image_id = random.choice(search_results)
return PAGE_WITH_IMAGE_URL_TEMPLATE.format(image_id=image_id)
def get_image_url(image_in_site_url: str):
response = requests.get(image_in_site_url)
html = response.text
if response.status_code != 200:
raise ConnectionError(f"Faild to get image url, code={response.status_code}")
return IMAGE_URL_REGEX.search(html).group(1)
if __name__ == "__main__":
random_list_url = get_random_image_list_url()
image_in_site_url = get_page_with_image_url(random_list_url)
image_url = get_image_url(image_in_site_url)
file_path = save_image(image_url)
print(file_path) | 0.345547 | 0.082697 |
from astropy import cosmology as cosmo
import json
import numpy as np
from typing import Optional, Union
from os import path
from autoconf import conf
import autofit as af
import autoarray as aa
from autogalaxy import exc
from autogalaxy.hyper.hyper_data import HyperImageSky
from autogalaxy.hyper.hyper_data import HyperBackgroundNoise
from autogalaxy.galaxy.galaxy import Galaxy
from autogalaxy.plane.plane import Plane
from autogalaxy.analysis.result import ResultDataset
class Analysis(af.Analysis):
def __init__(self, cosmology=cosmo.Planck15):
"""
Analysis classes are used by PyAutoFit to fit a model to a dataset via a non-linear search.
This abstract Analysis class for all model-fits which fit galaxies (or objects containing galaxies like a
plane), but does not perform a model-fit by itself (and is therefore only inherited from).
This class stores the Cosmology used for the analysis and hyper datasets used for certain model classes.
Parameters
----------
cosmology
The AstroPy Cosmology assumed for this analysis.
"""
self.cosmology = cosmology
def plane_for_instance(self, instance: af.ModelInstance) -> Plane:
"""
Create a `Plane` from the galaxies contained in a model instance.
Parameters
----------
instance
An instance of the model that is fitted to the data by this analysis (whose parameters may have been set
via a non-linear search).
Returns
-------
An instance of the Plane class that is used to then fit the dataset.
"""
return Plane(galaxies=instance.galaxies)
class AnalysisDataset(Analysis):
def __init__(
self,
dataset: Union[aa.Imaging, aa.Interferometer],
hyper_dataset_result: ResultDataset = None,
cosmology=cosmo.Planck15,
settings_pixelization: aa.SettingsPixelization = None,
settings_inversion: aa.SettingsInversion = None,
):
"""
Abstract Analysis class for all model-fits which fit galaxies (or objects containing galaxies like a plane)
to a dataset, like imaging or interferometer data.
This class stores the settings used to perform the model-fit for certain components of the model (e.g. a
pixelization or inversion), the Cosmology used for the analysis and hyper datasets used for certain model
classes.
Parameters
----------
dataset
The dataset that is the model is fitted too.
hyper_dataset_result
The hyper-model image and hyper galaxies images of a previous result in a model-fitting pipeline, which are
used by certain classes for adapting the analysis to the properties of the dataset.
cosmology
The Cosmology assumed for this analysis.
settings_pixelization
settings controlling how a pixelization is fitted during the model-fit, for example if a border is used
when creating the pixelization.
settings_inversion
Settings controlling how an inversion is fitted during the model-fit, for example which linear algebra
formalism is used.
"""
super().__init__(cosmology=cosmology)
self.dataset = dataset
self.hyper_dataset_result = hyper_dataset_result
if self.hyper_dataset_result is not None:
if hyper_dataset_result.search is not None:
hyper_dataset_result.search.paths = None
self.set_hyper_dataset(result=self.hyper_dataset_result)
else:
self.hyper_galaxy_image_path_dict = None
self.hyper_model_image = None
self.settings_pixelization = settings_pixelization or aa.SettingsPixelization()
self.settings_inversion = settings_inversion or aa.SettingsInversion()
self.preloads = aa.Preloads()
def set_hyper_dataset(self, result: ResultDataset) -> None:
"""
Using a the result of a previous model-fit, set the hyper-dataset for this analysis. This is used to adapt
aspects of the model (e.g. the pixelization, regularization scheme) to the properties of the dataset being
fitted.
This passes the hyper model image and hyper galaxy images of the previous fit. These represent where different
galaxies in the dataset are located and thus allows the fit to adapt different aspects of the model to different
galaxies in the data.
Parameters
----------
result
The result of a previous model-fit which contains the model image and model galaxy images of a fit to
the dataset, which set up the hyper dataset. These are used by certain classes for adapting the analysis
to the properties of the dataset.
"""
hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict
hyper_model_image = result.hyper_model_image
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
self.hyper_model_image = hyper_model_image
def hyper_image_sky_for_instance(
self, instance: af.ModelInstance
) -> Optional[HyperImageSky]:
"""
If the model instance contains a `HyperImageSky` attribute, which adds a free parameter to the model that
scales the background sky, return this attribute. Otherwise a None is returned.
Parameters
----------
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
Returns
-------
An instance of the hyper image sky class that scales the sky background.
"""
if hasattr(instance, "hyper_image_sky"):
return instance.hyper_image_sky
def hyper_background_noise_for_instance(
self, instance: af.ModelInstance
) -> Optional[HyperBackgroundNoise]:
"""
If the model instance contains a `HyperBackgroundNoise` attribute, which adds a free parameter to the model that
scales the background noise, return this attribute. Otherwise a None is returned.
Parameters
----------
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
Returns
-------
An instance of the hyper background noise class that scales the background noise.
"""
if hasattr(instance, "hyper_background_noise"):
return instance.hyper_background_noise
def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance:
"""
Using the model image and galaxy images that were set up as the hyper dataset, associate the galaxy images
of that result with the galaxies in this model fit.
Association is performed based on galaxy names, whereby if the name of a galaxy in this search matches the
full-path name of galaxies in the hyper dataset the galaxy image is passed.
If the galaxy collection has a different name then an association is not made.
For example, `galaxies.lens` will match with:
`galaxies.lens`
but not with:
`galaxies.source`
Parameters
----------
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search), which has 0 or more galaxies in its tree.
Returns
-------
instance
The input instance with images associated with galaxies where possible.
"""
if self.hyper_galaxy_image_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(Galaxy):
if galaxy_path in self.hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = self.hyper_model_image
galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[
galaxy_path
]
return instance
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
"""
Before the model-fit via the non-linear search begins, this routine saves attributes of the `Analysis` object
to the `pickles` folder such that they can be load after the analysis using PyAutoFit's database and aggregator
tools.
For this analysis the following are output:
- The dataset's data.
- The dataset's noise-map.
- The settings associated with the dataset.
- The settings associated with the inversion.
- The settings associated with the pixelization.
- The Cosmology.
- The hyper dataset's model image and galaxy images, if used.
It is common for these attributes to be loaded by many of the template aggregator functions given in the
`aggregator` modules. For example, when using the database tools to reperform a fit, this will by default
load the dataset, settings and other attributes necessary to perform a fit using the attributes output by
this function.
Parameters
----------
paths
The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization,
and the pickled objects used by the aggregator output by this function.
"""
paths.save_object("data", self.dataset.data)
paths.save_object("noise_map", self.dataset.noise_map)
paths.save_object("settings_dataset", self.dataset.settings)
paths.save_object("settings_inversion", self.settings_inversion)
paths.save_object("settings_pixelization", self.settings_pixelization)
paths.save_object("cosmology", self.cosmology)
if self.hyper_model_image is not None:
paths.save_object("hyper_model_image", self.hyper_model_image)
if self.hyper_galaxy_image_path_dict is not None:
paths.save_object(
"hyper_galaxy_image_path_dict", self.hyper_galaxy_image_path_dict
)
def output_or_check_figure_of_merit_sanity(
self, paths: af.DirectoryPaths, result: af.Result
):
"""
Changes to the PyAutoGalaxy source code may inadvertantly change the numerics of how a log likelihood is
computed. Equally, one may set off a model-fit that resumes from previous results, but change the settings of
the pixelization or inversion in a way that changes the log likelihood function.
This function performs an optional sanity check, which raises an exception if the log likelihood calculation
changes, to ensure a model-fit is not resumed with a different likelihood calculation to the previous run.
If the model-fit has not been performed before (e.g. it is not a resume) this function outputs
the `figure_of_merit` (e.g. the log likelihood) of the maximum log likelihood model at the end of the model-fit.
If the model-fit is a resume, it loads this `figure_of_merit` and compares it against a new value computed for
the resumed run (again using the maximum log likelihood model inferred). If the two likelihoods do not agree
and therefore the log likelihood function has changed, an exception is raised and the code execution terminated.
Parameters
----------
paths
The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization,
and pickled objects used by the database and aggregator.
result
The result containing the maximum log likelihood fit of the model.
"""
figure_of_merit = result.max_log_likelihood_fit.figure_of_merit
figure_of_merit_sanity_file = path.join(
paths.output_path, "figure_of_merit_sanity.json"
)
if not path.exists(figure_of_merit_sanity_file):
with open(figure_of_merit_sanity_file, "w+") as f:
json.dump(figure_of_merit, f)
else:
with open(figure_of_merit_sanity_file) as json_file:
figure_of_merit_sanity = json.load(json_file)
if conf.instance["general"]["test"]["check_figure_of_merit_sanity"]:
if not np.isclose(figure_of_merit, figure_of_merit_sanity):
raise exc.AnalysisException(
"Figure of merit sanity check failed. "
""
"This means that the existing results of a model fit used a different "
"likelihood function compared to the one implemented now.\n\n"
f"Old Figure of Merit = {figure_of_merit_sanity}\n"
f"New Figure of Merit = {figure_of_merit}"
) | autogalaxy/analysis/analysis.py | from astropy import cosmology as cosmo
import json
import numpy as np
from typing import Optional, Union
from os import path
from autoconf import conf
import autofit as af
import autoarray as aa
from autogalaxy import exc
from autogalaxy.hyper.hyper_data import HyperImageSky
from autogalaxy.hyper.hyper_data import HyperBackgroundNoise
from autogalaxy.galaxy.galaxy import Galaxy
from autogalaxy.plane.plane import Plane
from autogalaxy.analysis.result import ResultDataset
class Analysis(af.Analysis):
def __init__(self, cosmology=cosmo.Planck15):
"""
Analysis classes are used by PyAutoFit to fit a model to a dataset via a non-linear search.
This abstract Analysis class for all model-fits which fit galaxies (or objects containing galaxies like a
plane), but does not perform a model-fit by itself (and is therefore only inherited from).
This class stores the Cosmology used for the analysis and hyper datasets used for certain model classes.
Parameters
----------
cosmology
The AstroPy Cosmology assumed for this analysis.
"""
self.cosmology = cosmology
def plane_for_instance(self, instance: af.ModelInstance) -> Plane:
"""
Create a `Plane` from the galaxies contained in a model instance.
Parameters
----------
instance
An instance of the model that is fitted to the data by this analysis (whose parameters may have been set
via a non-linear search).
Returns
-------
An instance of the Plane class that is used to then fit the dataset.
"""
return Plane(galaxies=instance.galaxies)
class AnalysisDataset(Analysis):
def __init__(
self,
dataset: Union[aa.Imaging, aa.Interferometer],
hyper_dataset_result: ResultDataset = None,
cosmology=cosmo.Planck15,
settings_pixelization: aa.SettingsPixelization = None,
settings_inversion: aa.SettingsInversion = None,
):
"""
Abstract Analysis class for all model-fits which fit galaxies (or objects containing galaxies like a plane)
to a dataset, like imaging or interferometer data.
This class stores the settings used to perform the model-fit for certain components of the model (e.g. a
pixelization or inversion), the Cosmology used for the analysis and hyper datasets used for certain model
classes.
Parameters
----------
dataset
The dataset that is the model is fitted too.
hyper_dataset_result
The hyper-model image and hyper galaxies images of a previous result in a model-fitting pipeline, which are
used by certain classes for adapting the analysis to the properties of the dataset.
cosmology
The Cosmology assumed for this analysis.
settings_pixelization
settings controlling how a pixelization is fitted during the model-fit, for example if a border is used
when creating the pixelization.
settings_inversion
Settings controlling how an inversion is fitted during the model-fit, for example which linear algebra
formalism is used.
"""
super().__init__(cosmology=cosmology)
self.dataset = dataset
self.hyper_dataset_result = hyper_dataset_result
if self.hyper_dataset_result is not None:
if hyper_dataset_result.search is not None:
hyper_dataset_result.search.paths = None
self.set_hyper_dataset(result=self.hyper_dataset_result)
else:
self.hyper_galaxy_image_path_dict = None
self.hyper_model_image = None
self.settings_pixelization = settings_pixelization or aa.SettingsPixelization()
self.settings_inversion = settings_inversion or aa.SettingsInversion()
self.preloads = aa.Preloads()
def set_hyper_dataset(self, result: ResultDataset) -> None:
"""
Using a the result of a previous model-fit, set the hyper-dataset for this analysis. This is used to adapt
aspects of the model (e.g. the pixelization, regularization scheme) to the properties of the dataset being
fitted.
This passes the hyper model image and hyper galaxy images of the previous fit. These represent where different
galaxies in the dataset are located and thus allows the fit to adapt different aspects of the model to different
galaxies in the data.
Parameters
----------
result
The result of a previous model-fit which contains the model image and model galaxy images of a fit to
the dataset, which set up the hyper dataset. These are used by certain classes for adapting the analysis
to the properties of the dataset.
"""
hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict
hyper_model_image = result.hyper_model_image
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
self.hyper_model_image = hyper_model_image
def hyper_image_sky_for_instance(
self, instance: af.ModelInstance
) -> Optional[HyperImageSky]:
"""
If the model instance contains a `HyperImageSky` attribute, which adds a free parameter to the model that
scales the background sky, return this attribute. Otherwise a None is returned.
Parameters
----------
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
Returns
-------
An instance of the hyper image sky class that scales the sky background.
"""
if hasattr(instance, "hyper_image_sky"):
return instance.hyper_image_sky
def hyper_background_noise_for_instance(
self, instance: af.ModelInstance
) -> Optional[HyperBackgroundNoise]:
"""
If the model instance contains a `HyperBackgroundNoise` attribute, which adds a free parameter to the model that
scales the background noise, return this attribute. Otherwise a None is returned.
Parameters
----------
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
Returns
-------
An instance of the hyper background noise class that scales the background noise.
"""
if hasattr(instance, "hyper_background_noise"):
return instance.hyper_background_noise
def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance:
"""
Using the model image and galaxy images that were set up as the hyper dataset, associate the galaxy images
of that result with the galaxies in this model fit.
Association is performed based on galaxy names, whereby if the name of a galaxy in this search matches the
full-path name of galaxies in the hyper dataset the galaxy image is passed.
If the galaxy collection has a different name then an association is not made.
For example, `galaxies.lens` will match with:
`galaxies.lens`
but not with:
`galaxies.source`
Parameters
----------
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search), which has 0 or more galaxies in its tree.
Returns
-------
instance
The input instance with images associated with galaxies where possible.
"""
if self.hyper_galaxy_image_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(Galaxy):
if galaxy_path in self.hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = self.hyper_model_image
galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[
galaxy_path
]
return instance
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
"""
Before the model-fit via the non-linear search begins, this routine saves attributes of the `Analysis` object
to the `pickles` folder such that they can be load after the analysis using PyAutoFit's database and aggregator
tools.
For this analysis the following are output:
- The dataset's data.
- The dataset's noise-map.
- The settings associated with the dataset.
- The settings associated with the inversion.
- The settings associated with the pixelization.
- The Cosmology.
- The hyper dataset's model image and galaxy images, if used.
It is common for these attributes to be loaded by many of the template aggregator functions given in the
`aggregator` modules. For example, when using the database tools to reperform a fit, this will by default
load the dataset, settings and other attributes necessary to perform a fit using the attributes output by
this function.
Parameters
----------
paths
The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization,
and the pickled objects used by the aggregator output by this function.
"""
paths.save_object("data", self.dataset.data)
paths.save_object("noise_map", self.dataset.noise_map)
paths.save_object("settings_dataset", self.dataset.settings)
paths.save_object("settings_inversion", self.settings_inversion)
paths.save_object("settings_pixelization", self.settings_pixelization)
paths.save_object("cosmology", self.cosmology)
if self.hyper_model_image is not None:
paths.save_object("hyper_model_image", self.hyper_model_image)
if self.hyper_galaxy_image_path_dict is not None:
paths.save_object(
"hyper_galaxy_image_path_dict", self.hyper_galaxy_image_path_dict
)
def output_or_check_figure_of_merit_sanity(
self, paths: af.DirectoryPaths, result: af.Result
):
"""
Changes to the PyAutoGalaxy source code may inadvertantly change the numerics of how a log likelihood is
computed. Equally, one may set off a model-fit that resumes from previous results, but change the settings of
the pixelization or inversion in a way that changes the log likelihood function.
This function performs an optional sanity check, which raises an exception if the log likelihood calculation
changes, to ensure a model-fit is not resumed with a different likelihood calculation to the previous run.
If the model-fit has not been performed before (e.g. it is not a resume) this function outputs
the `figure_of_merit` (e.g. the log likelihood) of the maximum log likelihood model at the end of the model-fit.
If the model-fit is a resume, it loads this `figure_of_merit` and compares it against a new value computed for
the resumed run (again using the maximum log likelihood model inferred). If the two likelihoods do not agree
and therefore the log likelihood function has changed, an exception is raised and the code execution terminated.
Parameters
----------
paths
The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization,
and pickled objects used by the database and aggregator.
result
The result containing the maximum log likelihood fit of the model.
"""
figure_of_merit = result.max_log_likelihood_fit.figure_of_merit
figure_of_merit_sanity_file = path.join(
paths.output_path, "figure_of_merit_sanity.json"
)
if not path.exists(figure_of_merit_sanity_file):
with open(figure_of_merit_sanity_file, "w+") as f:
json.dump(figure_of_merit, f)
else:
with open(figure_of_merit_sanity_file) as json_file:
figure_of_merit_sanity = json.load(json_file)
if conf.instance["general"]["test"]["check_figure_of_merit_sanity"]:
if not np.isclose(figure_of_merit, figure_of_merit_sanity):
raise exc.AnalysisException(
"Figure of merit sanity check failed. "
""
"This means that the existing results of a model fit used a different "
"likelihood function compared to the one implemented now.\n\n"
f"Old Figure of Merit = {figure_of_merit_sanity}\n"
f"New Figure of Merit = {figure_of_merit}"
) | 0.948466 | 0.476701 |
from asn1crypto.cms import ContentInfo
from asn1crypto.crl import CertificateList
import rpki.roa
import rpki.manifest
from rpki.certificate import RPKICertificate
import os
import sys
import socket
import json
from datetime import datetime
ADDRESS_FAMILY_IPV4 = b'\x00\x01'
ADDRESS_FAMILY_IPV6 = b'\x00\x02'
# Turn a tuple of bits into a byte string. The number of bits needs to be a
# multiple of 8.
def bits_to_bytes(bits):
if len(bits) % 8 != 0:
raise ValueError("Number of bits not a multiple of 8")
out = []
for i in range(0, len(bits) >> 3):
v = 0
for j in range(0, 8):
v |= bits[i*8+j] << j
out.append(v)
return bytes(out)
# Print bits as IPv4 prefix in CIDR notation
def ipv4_prefix_to_string(bits):
if len(bits) > 32:
raise ValueError("Too many bits for IPv4 prefix")
# Extend bits to full IPv4 length
prefix = bits + tuple(0 for _ in range(32 - len(bits)))
b = bits_to_bytes(prefix)
str_prefix = socket.inet_ntop(socket.AF_INET, b) + "/" + str(len(bits))
return str_prefix
# Print bits as IPv6 prefix in CIDR notation
def ipv6_prefix_to_string(bits):
if len(bits) > 128:
raise ValueError("Too many bits for IPv6 prefix")
# Extend bits to full IPv6 length
prefix = bits + tuple(0 for _ in range(128 - len(bits)))
b = bits_to_bytes(prefix)
str_prefix = socket.inet_ntop(socket.AF_INET6, b) + "/" + str(len(bits))
return str_prefix
# Rewrite ipAddrBlocks in native format to readable prefixes
def rewrite_ipAddrBlocks(ipAddrBlocks):
for ipAddrBlock in ipAddrBlocks:
if ipAddrBlock['addressFamily'] == ADDRESS_FAMILY_IPV4:
ipAddrBlock['addressFamily'] = 'IPv4'
for k in range(0, len(ipAddrBlock['addresses'])):
# Rewrite IP prefix from bits to readable string
ipAddrBlock['addresses'][k]['address'] = ipv4_prefix_to_string(ipAddrBlock['addresses'][k]['address'])
# TODO Check max_length is consistent with prefix length?
elif ipAddrBlock['addressFamily'] == ADDRESS_FAMILY_IPV6:
ipAddrBlock['addressFamily'] = 'IPv6'
for k in range(0, len(ipAddrBlock['addresses'])):
# Rewrite IP prefix from bits to readable string
ipAddrBlock['addresses'][k]['address'] = ipv6_prefix_to_string(ipAddrBlock['addresses'][k]['address'])
# TODO Check max_length is consistent with prefix length?
else:
raise ValueError("Invalid addressFamily")
# Return version of object that can be converted to JSON.
# Byte strings are converted to hex, datetime to isoformat, sets to lists.
def jsonize_object(obj):
if isinstance(obj, dict):
return dict(map(lambda i: (i[0], jsonize_object(i[1])), obj.items()))
elif isinstance(obj, list) or isinstance(obj, set):
return list(map(jsonize_object, obj))
elif type(obj) == bytes:
return obj.hex()
elif type(obj) == datetime:
return obj.isoformat()
else:
return obj
def process_roa(roa):
# Rewrite the IP addresses in the ipAddrBlocks to readable prefixes
rewrite_ipAddrBlocks(roa['ipAddrBlocks'])
def process_manifest(manifest):
# Rewrite hashes to hex/bytes
for fileHash in manifest['fileList']:
fileHash['hash'] = bits_to_bytes(fileHash['hash']).hex()
def process_certificate(certificate):
# Rewrite ipAddressChoice
for ext in certificate['tbs_certificate']['extensions']:
if ext['extn_id'] == 'id-pe-ipAddrBlocks':
for ipAddrFamily in ext['extn_value']:
if ipAddrFamily['addressFamily'] == ADDRESS_FAMILY_IPV4:
ipAddrFamily['addressFamily'] = 'IPv4'
if ipAddrFamily['ipAddressChoice']:
for k in range(0, len(ipAddrFamily['ipAddressChoice'])):
# Rewrite IP prefix from bits to readable string
ipAddrFamily['ipAddressChoice'][k] = ipv4_prefix_to_string(ipAddrFamily['ipAddressChoice'][k])
elif ipAddrFamily['addressFamily'] == ADDRESS_FAMILY_IPV6:
ipAddrFamily['addressFamily'] = 'IPv6'
if ipAddrFamily['ipAddressChoice']:
for k in range(0, len(ipAddrFamily['ipAddressChoice'])):
# Rewrite IP prefix from bits to readable string
ipAddrFamily['ipAddressChoice'][k] = ipv6_prefix_to_string(ipAddrFamily['ipAddressChoice'][k])
def main():
if len(sys.argv) < 2:
sys.exit("Not enough arguments")
path = sys.argv[1]
# TODO Add flag to override detection based on filetype
# Try to determine type based on extension
file, ext = os.path.splitext(path)
ext = ext.lower()
if ext == '.roa':
ext_class = ContentInfo
elif ext == '.mft':
ext_class = ContentInfo
elif ext == '.crl':
ext_class = CertificateList
elif ext == '.cer':
ext_class = RPKICertificate
else:
sys.exit("Unknown filetype: " + ext)
# Read file
try:
file = open(path, "rb")
der_byte_string = file.read()
except Exception as e:
sys.exit("Could not read file.\n" + str(e))
# Parse ASN.1 data using previously picked type
try:
parsed = ext_class.load(der_byte_string)
except Exception as e:
sys.exit("Could not parse file.\n" + str(e))
# TODO Sanity check of resulting data
try:
# Convert to readable JSON output
data = parsed.native
if type(parsed) is ContentInfo:
for cert in data['content']['certificates']:
process_certificate(cert)
if data['content']['encap_content_info']['content_type'] == 'routeOriginAuthz':
process_roa(data['content']['encap_content_info']['content'])
elif data['content']['encap_content_info']['content_type'] == 'rpkiManifest':
process_manifest(data['content']['encap_content_info']['content'])
elif type(parsed) is RPKICertificate:
process_certificate(data)
elif type(parsed) is CertificateList:
pass
else:
sys.exit("Unkown content type")
print(json.dumps(jsonize_object(data), indent=2))
except Exception as e:
sys.exit("Something went wrong:\n" + str(e))
if __name__ == "__main__":
main() | dump_json.py | from asn1crypto.cms import ContentInfo
from asn1crypto.crl import CertificateList
import rpki.roa
import rpki.manifest
from rpki.certificate import RPKICertificate
import os
import sys
import socket
import json
from datetime import datetime
ADDRESS_FAMILY_IPV4 = b'\x00\x01'
ADDRESS_FAMILY_IPV6 = b'\x00\x02'
# Turn a tuple of bits into a byte string. The number of bits needs to be a
# multiple of 8.
def bits_to_bytes(bits):
if len(bits) % 8 != 0:
raise ValueError("Number of bits not a multiple of 8")
out = []
for i in range(0, len(bits) >> 3):
v = 0
for j in range(0, 8):
v |= bits[i*8+j] << j
out.append(v)
return bytes(out)
# Print bits as IPv4 prefix in CIDR notation
def ipv4_prefix_to_string(bits):
if len(bits) > 32:
raise ValueError("Too many bits for IPv4 prefix")
# Extend bits to full IPv4 length
prefix = bits + tuple(0 for _ in range(32 - len(bits)))
b = bits_to_bytes(prefix)
str_prefix = socket.inet_ntop(socket.AF_INET, b) + "/" + str(len(bits))
return str_prefix
# Print bits as IPv6 prefix in CIDR notation
def ipv6_prefix_to_string(bits):
if len(bits) > 128:
raise ValueError("Too many bits for IPv6 prefix")
# Extend bits to full IPv6 length
prefix = bits + tuple(0 for _ in range(128 - len(bits)))
b = bits_to_bytes(prefix)
str_prefix = socket.inet_ntop(socket.AF_INET6, b) + "/" + str(len(bits))
return str_prefix
# Rewrite ipAddrBlocks in native format to readable prefixes
def rewrite_ipAddrBlocks(ipAddrBlocks):
for ipAddrBlock in ipAddrBlocks:
if ipAddrBlock['addressFamily'] == ADDRESS_FAMILY_IPV4:
ipAddrBlock['addressFamily'] = 'IPv4'
for k in range(0, len(ipAddrBlock['addresses'])):
# Rewrite IP prefix from bits to readable string
ipAddrBlock['addresses'][k]['address'] = ipv4_prefix_to_string(ipAddrBlock['addresses'][k]['address'])
# TODO Check max_length is consistent with prefix length?
elif ipAddrBlock['addressFamily'] == ADDRESS_FAMILY_IPV6:
ipAddrBlock['addressFamily'] = 'IPv6'
for k in range(0, len(ipAddrBlock['addresses'])):
# Rewrite IP prefix from bits to readable string
ipAddrBlock['addresses'][k]['address'] = ipv6_prefix_to_string(ipAddrBlock['addresses'][k]['address'])
# TODO Check max_length is consistent with prefix length?
else:
raise ValueError("Invalid addressFamily")
# Return version of object that can be converted to JSON.
# Byte strings are converted to hex, datetime to isoformat, sets to lists.
def jsonize_object(obj):
if isinstance(obj, dict):
return dict(map(lambda i: (i[0], jsonize_object(i[1])), obj.items()))
elif isinstance(obj, list) or isinstance(obj, set):
return list(map(jsonize_object, obj))
elif type(obj) == bytes:
return obj.hex()
elif type(obj) == datetime:
return obj.isoformat()
else:
return obj
def process_roa(roa):
# Rewrite the IP addresses in the ipAddrBlocks to readable prefixes
rewrite_ipAddrBlocks(roa['ipAddrBlocks'])
def process_manifest(manifest):
# Rewrite hashes to hex/bytes
for fileHash in manifest['fileList']:
fileHash['hash'] = bits_to_bytes(fileHash['hash']).hex()
def process_certificate(certificate):
# Rewrite ipAddressChoice
for ext in certificate['tbs_certificate']['extensions']:
if ext['extn_id'] == 'id-pe-ipAddrBlocks':
for ipAddrFamily in ext['extn_value']:
if ipAddrFamily['addressFamily'] == ADDRESS_FAMILY_IPV4:
ipAddrFamily['addressFamily'] = 'IPv4'
if ipAddrFamily['ipAddressChoice']:
for k in range(0, len(ipAddrFamily['ipAddressChoice'])):
# Rewrite IP prefix from bits to readable string
ipAddrFamily['ipAddressChoice'][k] = ipv4_prefix_to_string(ipAddrFamily['ipAddressChoice'][k])
elif ipAddrFamily['addressFamily'] == ADDRESS_FAMILY_IPV6:
ipAddrFamily['addressFamily'] = 'IPv6'
if ipAddrFamily['ipAddressChoice']:
for k in range(0, len(ipAddrFamily['ipAddressChoice'])):
# Rewrite IP prefix from bits to readable string
ipAddrFamily['ipAddressChoice'][k] = ipv6_prefix_to_string(ipAddrFamily['ipAddressChoice'][k])
def main():
if len(sys.argv) < 2:
sys.exit("Not enough arguments")
path = sys.argv[1]
# TODO Add flag to override detection based on filetype
# Try to determine type based on extension
file, ext = os.path.splitext(path)
ext = ext.lower()
if ext == '.roa':
ext_class = ContentInfo
elif ext == '.mft':
ext_class = ContentInfo
elif ext == '.crl':
ext_class = CertificateList
elif ext == '.cer':
ext_class = RPKICertificate
else:
sys.exit("Unknown filetype: " + ext)
# Read file
try:
file = open(path, "rb")
der_byte_string = file.read()
except Exception as e:
sys.exit("Could not read file.\n" + str(e))
# Parse ASN.1 data using previously picked type
try:
parsed = ext_class.load(der_byte_string)
except Exception as e:
sys.exit("Could not parse file.\n" + str(e))
# TODO Sanity check of resulting data
try:
# Convert to readable JSON output
data = parsed.native
if type(parsed) is ContentInfo:
for cert in data['content']['certificates']:
process_certificate(cert)
if data['content']['encap_content_info']['content_type'] == 'routeOriginAuthz':
process_roa(data['content']['encap_content_info']['content'])
elif data['content']['encap_content_info']['content_type'] == 'rpkiManifest':
process_manifest(data['content']['encap_content_info']['content'])
elif type(parsed) is RPKICertificate:
process_certificate(data)
elif type(parsed) is CertificateList:
pass
else:
sys.exit("Unkown content type")
print(json.dumps(jsonize_object(data), indent=2))
except Exception as e:
sys.exit("Something went wrong:\n" + str(e))
if __name__ == "__main__":
main() | 0.219087 | 0.218576 |
import os
import asyncio
import pandas as pd
import numpy as np
from time import sleep, time
from polofutures import RestClient, WsClient
_MAX_ROWS = 500
_LAST_TRADE = 0
# Account Keys
API_KEY = os.environ['PF_API_KEY']
SECRET = os.environ['PF_SECRET']
API_PASS = os.environ['PF_PASS']
# Trading parameters
SYMBOL = 'BTCUSDTPERP'
PREFIX = 'POLO_MM'
INTERVAL = "SET INTERVAL" # How often the MM loop runs in seconds, e.g. 15 seconds between loops
LEVERAGE = '25' # How much leverage you require, e.g. 25x leverage
ORDER_PAIRS = 5 # Number of order pairs to create, e.g. 5 pairs is 10 total orders
MIN_SPREAD = "SET MINIMUM SPREAD" # Minimum allowable spread to capture, in decimals, e.g. 0.001 is 0.1%
SPREAD_ADJUST = 0.002 # Sensitivity to spread change in decimals, e.g. 0.002 is 0.2% sensitivity
STEP_SIZE = 5 # Order step size in lots, from starting position. e.g. first order is 5, then 10,.. 15 and so on
RISK_LIMITS = {'short': -2000, 'long': 2000} # Maximum allowable position in lots, e.g. -2000 and 2000
rest_client = RestClient(API_KEY, SECRET, API_PASS)
# Fetch Rest MarketData - Last 100 ticks
market = rest_client.market_api()
ticker = [market.get_ticker(SYMBOL)]
trade = rest_client.trade_api()
class MarketMaker:
def __init__(self, lastest_tick):
self.latest_tick = lastest_tick
def open_orders(self):
# Checks open orders on the book, and the spread from real price
self.orders = self.orders[
['symbol', 'leverage', 'price', 'value', 'size', 'side', 'id', 'clientOid', 'status']].copy()
self.orders.sort_values('price', ascending=False, inplace=True)
self.orders['spread'] = self.orders['price'].astype(float) / self.latest_tick - 1
self.orders.reset_index(drop=True, inplace=True)
async def mm_loop(self):
# This is the MM loop that runs at every set interval specified in the parameters
self.trade_status()
self.prepare_orders()
self.place_orders()
def trade_status(self):
# Trade status updates
self.position = trade.get_position_details(SYMBOL)
self.orders = pd.DataFrame(trade.get_order_list(status='active')['items'])
print(f'\n------\n'
f'Time - {int(time())}\n'
f'Index Price {self.latest_tick}\n'
f'Position - {self.position["currentQty"]}\n'
f'Current Open Orders - {self.orders.shape[0]}\n'
f'Entry Price - {self.position["avgEntryPrice"]}\n'
f'liquidation Price - {self.position["liquidationPrice"]}\n'
f'Unrealised Pnl - {self.position["unrealisedRoePcnt"] * 100}%\n')
def prepare_orders(self):
# Prepare orders as they should be
self.prep_orders = pd.DataFrame(
{'orderNum': range(ORDER_PAIRS * 2), 'side': ['sell'] * ORDER_PAIRS + ['buy'] * ORDER_PAIRS})
self.prep_orders['spread_target'] = np.where(self.prep_orders['side'] == 'sell',
MIN_SPREAD * (ORDER_PAIRS - self.prep_orders['orderNum']),
MIN_SPREAD * (ORDER_PAIRS - (1 + self.prep_orders['orderNum']))
)
self.prep_orders['price_target'] = ((1 + self.prep_orders['spread_target']) * self.latest_tick).astype(int)
self.prep_orders['size'] = (abs(self.prep_orders['spread_target']) * STEP_SIZE * 1000).astype(int)
# Compare to orders that exist
# If no orders exist, place the starter orders
if self.orders.shape[0] == 0:
print('No Orders Found!\nPlacing Starting Orders...')
# print(self.prep_orders.to_string()) -- Use this for debugging
# Else place orders based on existing
else:
self.open_orders()
self.prep_orders = pd.merge(self.prep_orders, self.orders, on=['side', 'size'], how='left').fillna('No Order')
print(self.prep_orders.to_string())
def place_orders(self):
for index, row in self.prep_orders.iterrows():
if self.position["currentQty"] > RISK_LIMITS['long'] and row['side'] == 'buy':
print(f'Long risk limit Exceeded {RISK_LIMITS["long"]}')
elif self.position["currentQty"] < RISK_LIMITS['short'] and row['side'] == 'short':
print(f'Short risk limit Exceeded {RISK_LIMITS["short"]}')
else:
clientId = f'{PREFIX}-' \
f'{row["side"][0]}' \
f'{int(row["size"])}' \
f'at{row["price_target"]}' \
f'ts{int(time())}'
if self.orders.shape[0] < ORDER_PAIRS * 2:
if 'id' not in self.prep_orders or row['id'] == 'No Order':
orderid = trade.create_limit_order(symbol=SYMBOL,
side=row['side'],
leverage=LEVERAGE,
size=row['size'],
price=str(row['price_target']),
postOnly=True,
clientOid=clientId)
print(f'Order Placed! ClientID: {clientId}\tServer ID: {orderid["orderId"]}')
if self.orders.shape[0] > 0 and row['spread'] != 'No Order':
# Adjust the existing orders on the book
spread_move = abs(row['price_target']/int(row['price']) - 1)
if spread_move > MIN_SPREAD*(1+SPREAD_ADJUST):
trade.cancel_order(row['id'])
orderid = trade.create_limit_order(symbol=SYMBOL,
side=row['side'],
leverage=LEVERAGE,
size=row['size'],
price=str(row['price_target']),
postOnly=True,
clientOid=clientId)
print(f'Order Adjusted! ClientID: {row["clientOid"]}\tServer ID: {orderid["orderId"]}')
def get_index(msg):
if msg['topic'] == f'/contract/instrument:{SYMBOL}':
if 'indexPrice' in msg['data']:
global CURRENT_INDEX
CURRENT_INDEX = msg['data']['indexPrice']
else:
pass
async def ws_stream():
async def mm_async_loop():
try:
mm = MarketMaker(CURRENT_INDEX)
await mm.mm_loop()
await asyncio.sleep(INTERVAL)
except Exception as e:
print(f'Market Maker Error!\n'
f'Check Parameter Inputs\n {e}')
await ws_client.connect()
await ws_client.subscribe(f'/contract/instrument:{SYMBOL}')
while True:
await mm_async_loop()
await asyncio.sleep(0.1)
print('Starting Market Maker!')
CURRENT_INDEX = market.get_current_mark_price(SYMBOL)['indexPrice']
ws_client = WsClient(get_index, API_KEY, SECRET, API_PASS)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(ws_stream())
except (KeyboardInterrupt, Exception) as e:
print(f'Stream Error\n {e}')
finally:
print('Cancelling Orders and Shutting Down')
trade.cancel_all_limit_orders(SYMBOL)
print('Unsubscribing and disconnecting from websocket')
loop.run_until_complete(ws_client.disconnect())
loop.close() | sample-MM.py |
import os
import asyncio
import pandas as pd
import numpy as np
from time import sleep, time
from polofutures import RestClient, WsClient
_MAX_ROWS = 500
_LAST_TRADE = 0
# Account Keys
API_KEY = os.environ['PF_API_KEY']
SECRET = os.environ['PF_SECRET']
API_PASS = os.environ['PF_PASS']
# Trading parameters
SYMBOL = 'BTCUSDTPERP'
PREFIX = 'POLO_MM'
INTERVAL = "SET INTERVAL" # How often the MM loop runs in seconds, e.g. 15 seconds between loops
LEVERAGE = '25' # How much leverage you require, e.g. 25x leverage
ORDER_PAIRS = 5 # Number of order pairs to create, e.g. 5 pairs is 10 total orders
MIN_SPREAD = "SET MINIMUM SPREAD" # Minimum allowable spread to capture, in decimals, e.g. 0.001 is 0.1%
SPREAD_ADJUST = 0.002 # Sensitivity to spread change in decimals, e.g. 0.002 is 0.2% sensitivity
STEP_SIZE = 5 # Order step size in lots, from starting position. e.g. first order is 5, then 10,.. 15 and so on
RISK_LIMITS = {'short': -2000, 'long': 2000} # Maximum allowable position in lots, e.g. -2000 and 2000
rest_client = RestClient(API_KEY, SECRET, API_PASS)
# Fetch Rest MarketData - Last 100 ticks
market = rest_client.market_api()
ticker = [market.get_ticker(SYMBOL)]
trade = rest_client.trade_api()
class MarketMaker:
def __init__(self, lastest_tick):
self.latest_tick = lastest_tick
def open_orders(self):
# Checks open orders on the book, and the spread from real price
self.orders = self.orders[
['symbol', 'leverage', 'price', 'value', 'size', 'side', 'id', 'clientOid', 'status']].copy()
self.orders.sort_values('price', ascending=False, inplace=True)
self.orders['spread'] = self.orders['price'].astype(float) / self.latest_tick - 1
self.orders.reset_index(drop=True, inplace=True)
async def mm_loop(self):
# This is the MM loop that runs at every set interval specified in the parameters
self.trade_status()
self.prepare_orders()
self.place_orders()
def trade_status(self):
# Trade status updates
self.position = trade.get_position_details(SYMBOL)
self.orders = pd.DataFrame(trade.get_order_list(status='active')['items'])
print(f'\n------\n'
f'Time - {int(time())}\n'
f'Index Price {self.latest_tick}\n'
f'Position - {self.position["currentQty"]}\n'
f'Current Open Orders - {self.orders.shape[0]}\n'
f'Entry Price - {self.position["avgEntryPrice"]}\n'
f'liquidation Price - {self.position["liquidationPrice"]}\n'
f'Unrealised Pnl - {self.position["unrealisedRoePcnt"] * 100}%\n')
def prepare_orders(self):
# Prepare orders as they should be
self.prep_orders = pd.DataFrame(
{'orderNum': range(ORDER_PAIRS * 2), 'side': ['sell'] * ORDER_PAIRS + ['buy'] * ORDER_PAIRS})
self.prep_orders['spread_target'] = np.where(self.prep_orders['side'] == 'sell',
MIN_SPREAD * (ORDER_PAIRS - self.prep_orders['orderNum']),
MIN_SPREAD * (ORDER_PAIRS - (1 + self.prep_orders['orderNum']))
)
self.prep_orders['price_target'] = ((1 + self.prep_orders['spread_target']) * self.latest_tick).astype(int)
self.prep_orders['size'] = (abs(self.prep_orders['spread_target']) * STEP_SIZE * 1000).astype(int)
# Compare to orders that exist
# If no orders exist, place the starter orders
if self.orders.shape[0] == 0:
print('No Orders Found!\nPlacing Starting Orders...')
# print(self.prep_orders.to_string()) -- Use this for debugging
# Else place orders based on existing
else:
self.open_orders()
self.prep_orders = pd.merge(self.prep_orders, self.orders, on=['side', 'size'], how='left').fillna('No Order')
print(self.prep_orders.to_string())
def place_orders(self):
for index, row in self.prep_orders.iterrows():
if self.position["currentQty"] > RISK_LIMITS['long'] and row['side'] == 'buy':
print(f'Long risk limit Exceeded {RISK_LIMITS["long"]}')
elif self.position["currentQty"] < RISK_LIMITS['short'] and row['side'] == 'short':
print(f'Short risk limit Exceeded {RISK_LIMITS["short"]}')
else:
clientId = f'{PREFIX}-' \
f'{row["side"][0]}' \
f'{int(row["size"])}' \
f'at{row["price_target"]}' \
f'ts{int(time())}'
if self.orders.shape[0] < ORDER_PAIRS * 2:
if 'id' not in self.prep_orders or row['id'] == 'No Order':
orderid = trade.create_limit_order(symbol=SYMBOL,
side=row['side'],
leverage=LEVERAGE,
size=row['size'],
price=str(row['price_target']),
postOnly=True,
clientOid=clientId)
print(f'Order Placed! ClientID: {clientId}\tServer ID: {orderid["orderId"]}')
if self.orders.shape[0] > 0 and row['spread'] != 'No Order':
# Adjust the existing orders on the book
spread_move = abs(row['price_target']/int(row['price']) - 1)
if spread_move > MIN_SPREAD*(1+SPREAD_ADJUST):
trade.cancel_order(row['id'])
orderid = trade.create_limit_order(symbol=SYMBOL,
side=row['side'],
leverage=LEVERAGE,
size=row['size'],
price=str(row['price_target']),
postOnly=True,
clientOid=clientId)
print(f'Order Adjusted! ClientID: {row["clientOid"]}\tServer ID: {orderid["orderId"]}')
def get_index(msg):
if msg['topic'] == f'/contract/instrument:{SYMBOL}':
if 'indexPrice' in msg['data']:
global CURRENT_INDEX
CURRENT_INDEX = msg['data']['indexPrice']
else:
pass
async def ws_stream():
async def mm_async_loop():
try:
mm = MarketMaker(CURRENT_INDEX)
await mm.mm_loop()
await asyncio.sleep(INTERVAL)
except Exception as e:
print(f'Market Maker Error!\n'
f'Check Parameter Inputs\n {e}')
await ws_client.connect()
await ws_client.subscribe(f'/contract/instrument:{SYMBOL}')
while True:
await mm_async_loop()
await asyncio.sleep(0.1)
print('Starting Market Maker!')
CURRENT_INDEX = market.get_current_mark_price(SYMBOL)['indexPrice']
ws_client = WsClient(get_index, API_KEY, SECRET, API_PASS)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(ws_stream())
except (KeyboardInterrupt, Exception) as e:
print(f'Stream Error\n {e}')
finally:
print('Cancelling Orders and Shutting Down')
trade.cancel_all_limit_orders(SYMBOL)
print('Unsubscribing and disconnecting from websocket')
loop.run_until_complete(ws_client.disconnect())
loop.close() | 0.421195 | 0.134378 |
import redis
import json
import os
import boto3
from botocore import config
from counters import *
from vwr.common.sanitize import deep_clean
DDB_TABLE_NAME = os.environ["TOKEN_TABLE"]
EVENT_ID = os.environ["EVENT_ID"]
REDIS_HOST = os.environ["REDIS_HOST"]
REDIS_PORT = os.environ["REDIS_PORT"]
SOLUTION_ID = os.environ['SOLUTION_ID']
SECRET_NAME_PREFIX = os.environ["STACK_NAME"]
user_agent_extra = {"user_agent_extra": SOLUTION_ID}
user_config = config.Config(**user_agent_extra)
boto_session = boto3.session.Session()
region = boto_session.region_name
ddb_client = boto3.client('dynamodb', endpoint_url="https://dynamodb."+region+".amazonaws.com", config=user_config)
secrets_client = boto3.client('secretsmanager', config=user_config, endpoint_url="https://secretsmanager."+region+".amazonaws.com")
response = secrets_client.get_secret_value(SecretId=f"{SECRET_NAME_PREFIX}/redis-auth")
redis_auth = response.get("SecretString")
rc = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, ssl=True, decode_responses=True, password=redis_auth)
def lambda_handler(event, context):
"""
This function is the entry handler for Lambda.
"""
print(event)
client_event_id = deep_clean(event['event_id'])
response = {}
headers = {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
if EVENT_ID == client_event_id:
# reset counters
rc.getset(SERVING_COUNTER, 0)
rc.getset(QUEUE_COUNTER, 0)
rc.getset(TOKEN_COUNTER, 0)
rc.getset(COMPLETED_SESSION_COUNTER, 0)
rc.getset(ABANDONED_SESSION_COUNTER, 0)
try:
response = ddb_client.delete_table( TableName=DDB_TABLE_NAME )
waiter = ddb_client.get_waiter('table_not_exists')
# wait for table to get deleted
waiter.wait(TableName=DDB_TABLE_NAME)
print("Token table deleted")
# recreate table
response = ddb_client.create_table(
TableName= DDB_TABLE_NAME,
BillingMode = "PAY_PER_REQUEST",
AttributeDefinitions = [
{
"AttributeName": "request_id",
"AttributeType": "S"
},
{
"AttributeName": "expires",
"AttributeType": "N"
},
{
"AttributeName": "event_id",
"AttributeType": "S"
}
],
KeySchema = [
{
"AttributeName": "request_id",
"KeyType": "HASH"
}
],
GlobalSecondaryIndexes = [
{
"IndexName": "EventExpiresIndex",
"KeySchema": [
{
"AttributeName": "event_id",
"KeyType": "HASH"
},
{
"AttributeName": "expires",
"KeyType": "RANGE"
}
],
"Projection": {
"ProjectionType": "ALL"
}
}
],
SSESpecification = {
"Enabled": True
}
)
waiter = ddb_client.get_waiter('table_exists')
# wait for table to get created
waiter.wait(TableName=DDB_TABLE_NAME)
print("Token table recreated")
# enable PITR
ddb_client.update_continuous_backups(
TableName=DDB_TABLE_NAME,
PointInTimeRecoverySpecification={
'PointInTimeRecoveryEnabled': True
}
)
response = {
"statusCode": 200,
"headers": headers,
"body": json.dumps({
"message": "Counters reset. DynamoDB table recreated."
})
}
except Exception as other_exception:
print(other_exception)
raise other_exception
else:
response = {
"statusCode": 400,
"headers": headers,
"body": json.dumps({"error": "Invalid event ID"})
}
print(response)
return response | source/core-api/lambda_functions/reset_initial_state.py | import redis
import json
import os
import boto3
from botocore import config
from counters import *
from vwr.common.sanitize import deep_clean
DDB_TABLE_NAME = os.environ["TOKEN_TABLE"]
EVENT_ID = os.environ["EVENT_ID"]
REDIS_HOST = os.environ["REDIS_HOST"]
REDIS_PORT = os.environ["REDIS_PORT"]
SOLUTION_ID = os.environ['SOLUTION_ID']
SECRET_NAME_PREFIX = os.environ["STACK_NAME"]
user_agent_extra = {"user_agent_extra": SOLUTION_ID}
user_config = config.Config(**user_agent_extra)
boto_session = boto3.session.Session()
region = boto_session.region_name
ddb_client = boto3.client('dynamodb', endpoint_url="https://dynamodb."+region+".amazonaws.com", config=user_config)
secrets_client = boto3.client('secretsmanager', config=user_config, endpoint_url="https://secretsmanager."+region+".amazonaws.com")
response = secrets_client.get_secret_value(SecretId=f"{SECRET_NAME_PREFIX}/redis-auth")
redis_auth = response.get("SecretString")
rc = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, ssl=True, decode_responses=True, password=redis_auth)
def lambda_handler(event, context):
"""
This function is the entry handler for Lambda.
"""
print(event)
client_event_id = deep_clean(event['event_id'])
response = {}
headers = {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
if EVENT_ID == client_event_id:
# reset counters
rc.getset(SERVING_COUNTER, 0)
rc.getset(QUEUE_COUNTER, 0)
rc.getset(TOKEN_COUNTER, 0)
rc.getset(COMPLETED_SESSION_COUNTER, 0)
rc.getset(ABANDONED_SESSION_COUNTER, 0)
try:
response = ddb_client.delete_table( TableName=DDB_TABLE_NAME )
waiter = ddb_client.get_waiter('table_not_exists')
# wait for table to get deleted
waiter.wait(TableName=DDB_TABLE_NAME)
print("Token table deleted")
# recreate table
response = ddb_client.create_table(
TableName= DDB_TABLE_NAME,
BillingMode = "PAY_PER_REQUEST",
AttributeDefinitions = [
{
"AttributeName": "request_id",
"AttributeType": "S"
},
{
"AttributeName": "expires",
"AttributeType": "N"
},
{
"AttributeName": "event_id",
"AttributeType": "S"
}
],
KeySchema = [
{
"AttributeName": "request_id",
"KeyType": "HASH"
}
],
GlobalSecondaryIndexes = [
{
"IndexName": "EventExpiresIndex",
"KeySchema": [
{
"AttributeName": "event_id",
"KeyType": "HASH"
},
{
"AttributeName": "expires",
"KeyType": "RANGE"
}
],
"Projection": {
"ProjectionType": "ALL"
}
}
],
SSESpecification = {
"Enabled": True
}
)
waiter = ddb_client.get_waiter('table_exists')
# wait for table to get created
waiter.wait(TableName=DDB_TABLE_NAME)
print("Token table recreated")
# enable PITR
ddb_client.update_continuous_backups(
TableName=DDB_TABLE_NAME,
PointInTimeRecoverySpecification={
'PointInTimeRecoveryEnabled': True
}
)
response = {
"statusCode": 200,
"headers": headers,
"body": json.dumps({
"message": "Counters reset. DynamoDB table recreated."
})
}
except Exception as other_exception:
print(other_exception)
raise other_exception
else:
response = {
"statusCode": 400,
"headers": headers,
"body": json.dumps({"error": "Invalid event ID"})
}
print(response)
return response | 0.242026 | 0.109753 |
import re
from urllib.parse import urljoin
from ..base.request import check_network_state, NetworkState
from ..base.sign_in import check_final_state, SignState, Work
from ..utils.net_utils import get_module_name
from ..schema.discuz import Discuz
from ..utils import google_auth
class MainClass(Discuz):
URL = 'https://skyeysnow.com/'
USER_CLASSES = {
'points': [1000000]
}
@classmethod
def sign_in_build_schema(cls):
return {
get_module_name(cls): {
'type': 'object',
'properties': {
'login': {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'<PASSWORD>': '<PASSWORD>'},
},
'additionalProperties': False
}
},
'additionalProperties': False
}
}
def sign_in_build_login_workflow(self, entry, config):
return [
Work(
url='/login.php',
method=self.sign_in_by_get,
assert_state=(check_network_state, NetworkState.SUCCEED),
),
Work(
url='/login.php',
method=self.sign_in_by_login,
assert_state=(check_network_state, NetworkState.SUCCEED),
login_url_regex='(?<=action=").*?(?=")',
formhash_regex='(?<="formhash" value=").*(?=")'
)
]
def sign_in_build_workflow(self, entry, config):
return [
Work(
url='/',
method=self.sign_in_by_get,
succeed_regex=['<a.*?title="访问我的空间">.*?</a>'],
assert_state=(check_final_state, SignState.SUCCEED),
is_base_content=True
)
]
def sign_in_by_login(self, entry, config, work, last_content):
if not (login := entry['site_config'].get('login')):
entry.fail_with_prefix('Login data not found!')
return
secret_key = login.get('secret_key')
username, password = login['username'], login['password']
if secret_key:
totp_code = google_auth.calc(secret_key)
username += '@' + totp_code
login_url = urljoin(entry['url'], re.search(work.login_url_regex, last_content).group())
work.response_urls = [login_url]
formhash = re.search(work.formhash_regex, last_content).group()
data = {
'formhash': formhash,
'referer': '/',
'loginfield': 'username',
'username': username,
'password': password,
'loginsubmit': 'true'
}
return self.request(entry, 'post', login_url, data=data, verify=False) | ptsites/sites/skyey2.py | import re
from urllib.parse import urljoin
from ..base.request import check_network_state, NetworkState
from ..base.sign_in import check_final_state, SignState, Work
from ..utils.net_utils import get_module_name
from ..schema.discuz import Discuz
from ..utils import google_auth
class MainClass(Discuz):
URL = 'https://skyeysnow.com/'
USER_CLASSES = {
'points': [1000000]
}
@classmethod
def sign_in_build_schema(cls):
return {
get_module_name(cls): {
'type': 'object',
'properties': {
'login': {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'<PASSWORD>': '<PASSWORD>'},
},
'additionalProperties': False
}
},
'additionalProperties': False
}
}
def sign_in_build_login_workflow(self, entry, config):
return [
Work(
url='/login.php',
method=self.sign_in_by_get,
assert_state=(check_network_state, NetworkState.SUCCEED),
),
Work(
url='/login.php',
method=self.sign_in_by_login,
assert_state=(check_network_state, NetworkState.SUCCEED),
login_url_regex='(?<=action=").*?(?=")',
formhash_regex='(?<="formhash" value=").*(?=")'
)
]
def sign_in_build_workflow(self, entry, config):
return [
Work(
url='/',
method=self.sign_in_by_get,
succeed_regex=['<a.*?title="访问我的空间">.*?</a>'],
assert_state=(check_final_state, SignState.SUCCEED),
is_base_content=True
)
]
def sign_in_by_login(self, entry, config, work, last_content):
if not (login := entry['site_config'].get('login')):
entry.fail_with_prefix('Login data not found!')
return
secret_key = login.get('secret_key')
username, password = login['username'], login['password']
if secret_key:
totp_code = google_auth.calc(secret_key)
username += '@' + totp_code
login_url = urljoin(entry['url'], re.search(work.login_url_regex, last_content).group())
work.response_urls = [login_url]
formhash = re.search(work.formhash_regex, last_content).group()
data = {
'formhash': formhash,
'referer': '/',
'loginfield': 'username',
'username': username,
'password': password,
'loginsubmit': 'true'
}
return self.request(entry, 'post', login_url, data=data, verify=False) | 0.270288 | 0.186243 |
import gym
import numpy as np
from ctypes import *
from gym import spaces
from .helper import check_type_in_list
SL_TASK_PASS = 0
SL_TASK_RECEIVE = 1
SL_TASK_MOVE = 2
SL_TASK_INTERCEPT = 3
class ModelInput(Structure):
"""
Model input data structure.
"""
_fields_ = [('own_xyo_x', c_double), ('own_xyo_y', c_double),
('own_xyo_o', c_double), ('own_xyo_dot_x', c_double),
('own_xyo_dot_y', c_double), ('own_xyo_dot_o', c_double),
('peer_xyo_x', c_double), ('peer_xyo_y', c_double),
('peer_xyo_o', c_double), ('peer_xyo_dot_x', c_double),
('peer_xyo_dot_y', c_double), ('peer_xyo_dot_o', c_double),
('ball_xyz_x', c_double), ('ball_xyz_y', c_double),
('ball_xyz_z', c_double), ('ball_xyz_dot_x', c_double),
('ball_xyz_dot_y', c_double), ('ball_xyz_dot_z', c_double),
('own_ball_possession', c_double),
('peer_ball_possession', c_double)]
n_obs = len(_fields_)
class EnvironmentData(Structure):
"""
Environment data structure.
"""
_fields_ = [('reward', c_float), ('n_balls_passed', c_int)]
class ModelOutput(Structure):
"""
Model output data structure.
"""
_fields_ = [('role', c_int), ('action_space_size', c_int),
('vpg_value', c_float)]
n_prediction = len(_fields_)
class ModelTrainingData(Structure):
"""
Data to train the ML model.
"""
_pack_ = 1
_fields_ = [('time_stamp', c_int64), ('model_input', ModelInput),
('env_data', EnvironmentData), ('model_output', ModelOutput)]
class StrategyLearnerEnv(gym.Env):
"""Custom Environment that interfaces with the strategy learner in the
Tech United MSL software.
"""
metadata = {'render.modes': ['human']}
def __init__(self):
super(StrategyLearnerEnv, self).__init__()
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
self.action_space = spaces.Discrete(ModelOutput.n_actions)
# Example for using image as input:
self.observation_space = spaces.Box(low=0, high=1,
shape=[ModelInput.n_obs],
dtype=np.double)
def step(self, action):
# Execute one time step within the environment
return None
def reset(self):
# Reset the state of the environment to an initial state
return None
def render(self, mode='human', close=False):
# Render the environment to the screen
return None
class PreProcessor:
def __init__(self, structures: list):
"""
Pre-process model training data to make it suitable for plotting.
:param structures: Time ordered list of ModelTrainingData structures
for a single turtle.
"""
self._structures = structures
def process_new_data(self, structures: list):
"""
Process new turtle data using the pre processor.
:param structures: Time ordered lists of ModelTrainingData structures
for a single turtle.
"""
self.__init__(structures)
def get_time_synchronized_turtle_data(self):
"""
Return time synchronized turtle data.
:return: Time synchronized list of turtle data.
"""
return self._synchronise_time_data(self._structures)
def get_role_distributions(self):
distributions = []
for struct in self._structures:
dist = np.array([0, 0, 0, 0])
for t in struct:
if t.model_output.role == SL_TASK_PASS:
dist[0] += 1
elif t.model_output.role == SL_TASK_RECEIVE:
dist[1] += 1
elif t.model_output.role == SL_TASK_MOVE:
dist[2] += 1
elif t.model_output.role == SL_TASK_INTERCEPT:
dist[3] += 1
distributions.append(dist/dist.sum())
return distributions
def _synchronise_time_data(self, structures: list):
"""
Prune structures to cover the same time span.
:param structures: List of ModelTrainingData structures.
"""
[t_min, t_max] = self._get_time_bounds(structures)
for struct in structures:
check_type_in_list(struct, ModelTrainingData)
# Find indexes corresponding to bounds
min_index, max_index = self._get_time_bound_indexes(struct,
t_min, t_max)
# Delete data outside of bounds
del struct[0:min_index]
del struct[(max_index-min_index):]
return structures
@staticmethod
def _get_time_bound_indexes(struct, min_time, max_time):
"""
Return indexes for struct corresponding to time bounds and normalize
time.
:param struct: Time ordered list of ModelTrainingData structures
for a single turtle.
:param min_time: Lower time bound for cut-off.
:param max_time: Upper time bound for cut-off.
:return:
"""
min_found = False
max_found = False
for step in range(len(struct)):
if struct[step].time_stamp >= min_time and not min_found:
min_index = step
min_found = True
if struct[step].time_stamp >= max_time and not max_found:
max_index = step
max_found = True
struct[step].time_stamp = struct[step].time_stamp - min_time
if not max_found:
max_index = step
if min_found and max_found:
return min_index, max_index
else:
return 0, step
@staticmethod
def _get_time_bounds(structures: list):
"""
Return the time span shared between data structures.
:param structures: List of data structs for each turtle.
:return: [min_time, max_time]: Shared time bounds.
"""
min_time = structures[0][0].time_stamp
max_time = structures[0][-1].time_stamp
for struct in structures:
if min_time < struct[0].time_stamp:
min_time = struct[0].time_stamp
if max_time > struct[-1].time_stamp:
max_time = struct[-1].time_stamp
return [min_time, max_time]
if __name__ == "__main__":
sl_env = StrategyLearnerEnv() | training/utils/structs.py | import gym
import numpy as np
from ctypes import *
from gym import spaces
from .helper import check_type_in_list
SL_TASK_PASS = 0
SL_TASK_RECEIVE = 1
SL_TASK_MOVE = 2
SL_TASK_INTERCEPT = 3
class ModelInput(Structure):
"""
Model input data structure.
"""
_fields_ = [('own_xyo_x', c_double), ('own_xyo_y', c_double),
('own_xyo_o', c_double), ('own_xyo_dot_x', c_double),
('own_xyo_dot_y', c_double), ('own_xyo_dot_o', c_double),
('peer_xyo_x', c_double), ('peer_xyo_y', c_double),
('peer_xyo_o', c_double), ('peer_xyo_dot_x', c_double),
('peer_xyo_dot_y', c_double), ('peer_xyo_dot_o', c_double),
('ball_xyz_x', c_double), ('ball_xyz_y', c_double),
('ball_xyz_z', c_double), ('ball_xyz_dot_x', c_double),
('ball_xyz_dot_y', c_double), ('ball_xyz_dot_z', c_double),
('own_ball_possession', c_double),
('peer_ball_possession', c_double)]
n_obs = len(_fields_)
class EnvironmentData(Structure):
"""
Environment data structure.
"""
_fields_ = [('reward', c_float), ('n_balls_passed', c_int)]
class ModelOutput(Structure):
"""
Model output data structure.
"""
_fields_ = [('role', c_int), ('action_space_size', c_int),
('vpg_value', c_float)]
n_prediction = len(_fields_)
class ModelTrainingData(Structure):
"""
Data to train the ML model.
"""
_pack_ = 1
_fields_ = [('time_stamp', c_int64), ('model_input', ModelInput),
('env_data', EnvironmentData), ('model_output', ModelOutput)]
class StrategyLearnerEnv(gym.Env):
"""Custom Environment that interfaces with the strategy learner in the
Tech United MSL software.
"""
metadata = {'render.modes': ['human']}
def __init__(self):
super(StrategyLearnerEnv, self).__init__()
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
self.action_space = spaces.Discrete(ModelOutput.n_actions)
# Example for using image as input:
self.observation_space = spaces.Box(low=0, high=1,
shape=[ModelInput.n_obs],
dtype=np.double)
def step(self, action):
# Execute one time step within the environment
return None
def reset(self):
# Reset the state of the environment to an initial state
return None
def render(self, mode='human', close=False):
# Render the environment to the screen
return None
class PreProcessor:
def __init__(self, structures: list):
"""
Pre-process model training data to make it suitable for plotting.
:param structures: Time ordered list of ModelTrainingData structures
for a single turtle.
"""
self._structures = structures
def process_new_data(self, structures: list):
"""
Process new turtle data using the pre processor.
:param structures: Time ordered lists of ModelTrainingData structures
for a single turtle.
"""
self.__init__(structures)
def get_time_synchronized_turtle_data(self):
"""
Return time synchronized turtle data.
:return: Time synchronized list of turtle data.
"""
return self._synchronise_time_data(self._structures)
def get_role_distributions(self):
distributions = []
for struct in self._structures:
dist = np.array([0, 0, 0, 0])
for t in struct:
if t.model_output.role == SL_TASK_PASS:
dist[0] += 1
elif t.model_output.role == SL_TASK_RECEIVE:
dist[1] += 1
elif t.model_output.role == SL_TASK_MOVE:
dist[2] += 1
elif t.model_output.role == SL_TASK_INTERCEPT:
dist[3] += 1
distributions.append(dist/dist.sum())
return distributions
def _synchronise_time_data(self, structures: list):
"""
Prune structures to cover the same time span.
:param structures: List of ModelTrainingData structures.
"""
[t_min, t_max] = self._get_time_bounds(structures)
for struct in structures:
check_type_in_list(struct, ModelTrainingData)
# Find indexes corresponding to bounds
min_index, max_index = self._get_time_bound_indexes(struct,
t_min, t_max)
# Delete data outside of bounds
del struct[0:min_index]
del struct[(max_index-min_index):]
return structures
@staticmethod
def _get_time_bound_indexes(struct, min_time, max_time):
"""
Return indexes for struct corresponding to time bounds and normalize
time.
:param struct: Time ordered list of ModelTrainingData structures
for a single turtle.
:param min_time: Lower time bound for cut-off.
:param max_time: Upper time bound for cut-off.
:return:
"""
min_found = False
max_found = False
for step in range(len(struct)):
if struct[step].time_stamp >= min_time and not min_found:
min_index = step
min_found = True
if struct[step].time_stamp >= max_time and not max_found:
max_index = step
max_found = True
struct[step].time_stamp = struct[step].time_stamp - min_time
if not max_found:
max_index = step
if min_found and max_found:
return min_index, max_index
else:
return 0, step
@staticmethod
def _get_time_bounds(structures: list):
"""
Return the time span shared between data structures.
:param structures: List of data structs for each turtle.
:return: [min_time, max_time]: Shared time bounds.
"""
min_time = structures[0][0].time_stamp
max_time = structures[0][-1].time_stamp
for struct in structures:
if min_time < struct[0].time_stamp:
min_time = struct[0].time_stamp
if max_time > struct[-1].time_stamp:
max_time = struct[-1].time_stamp
return [min_time, max_time]
if __name__ == "__main__":
sl_env = StrategyLearnerEnv() | 0.830525 | 0.418816 |
from tweetbot.application import app
from service.esutil import es
from configure import es_mappings
from flask import Flask
from flask import request
from flask import abort
from datetime import date
from operator import itemgetter
import json, csv
from service.esutil.querybuilder.query_builder import QueryBuilder
from service.commons import json_response,json_to_csv
@app.route('/')
def index():
return '<h1>Welcome to TweetBot</h1>'
@app.route('/api1')
def stream():
res = dict()
try:
keywords = request.args.get('keywords')
runtime = request.args.get('runtime')
if keywords:
keywords = keywords.split(",")
else:
res = {
"status": "error",
"message": "Please provide a few keyword(s) (comma-separated)",
"example": "/api1?keywords=kw1,kw2,abc,xyz"
}
return json_response(res)
from twitter_api import Tweety
Tweety().filter(keywords=keywords,runtime = runtime)
res['status'] = "success"
res['message'] = "Started streaming tweets with keywords {}".format(keywords)
except Exception as exc:
res['status'] = "error"
res['message'] = exc.message
res['args'] = exc.args
return json_response(res)
@app.route('/api2', methods=["GET","POST"])
def search_handler():
es_size = int(request.args.get('size', 100))
es_from = int(request.args.get('from', 0))
data = json.loads(request.data)
criteria = data.get('criteria')
sort = data.get('sort')
s = QueryBuilder(criteria).search(index='tweets_index', doc_type='tweet')
if sort:
s = s.sort(*sort)
s = s[es_from:es_size]
print "[QUERY]", QueryBuilder.get_raw_query(s)
try:
es_res = QueryBuilder.execute(s)
except Exception as ex:
res = {
"status": "error",
"message": ex.message,
"args": ex.args
}
return json_response(res)
res = dict()
if es_res is not None:
hits = es_res.hits
res["count"] = {"total": hits.total, "fetched": len(hits.hits) }
res["results"] = hits.hits
return json_response(res)
@app.route('/api3', methods=["GET","POST"])
def jsontocsv():
data = json.loads(request.data)
criteria = data.get('criteria')
sort = data.get('sort')
s = QueryBuilder(criteria).search(index='tweets_index', doc_type='tweet')
if sort:
s = s.sort(*sort)
print "[QUERY]", QueryBuilder.get_raw_query(s)
try:
es_res = QueryBuilder.execute(s)
except Exception as ex:
res = {
"status": "error",
"message": ex.message,
"args": ex.args
}
return json_response(res)
res = dict()
if es_res is not None:
hits = es_res.hits
res["count"] = {"total": hits.total, "fetched": len(hits.hits) }
res["results"] = hits.hits
return json_to_csv(res) | tweetbot/views.py | from tweetbot.application import app
from service.esutil import es
from configure import es_mappings
from flask import Flask
from flask import request
from flask import abort
from datetime import date
from operator import itemgetter
import json, csv
from service.esutil.querybuilder.query_builder import QueryBuilder
from service.commons import json_response,json_to_csv
@app.route('/')
def index():
return '<h1>Welcome to TweetBot</h1>'
@app.route('/api1')
def stream():
res = dict()
try:
keywords = request.args.get('keywords')
runtime = request.args.get('runtime')
if keywords:
keywords = keywords.split(",")
else:
res = {
"status": "error",
"message": "Please provide a few keyword(s) (comma-separated)",
"example": "/api1?keywords=kw1,kw2,abc,xyz"
}
return json_response(res)
from twitter_api import Tweety
Tweety().filter(keywords=keywords,runtime = runtime)
res['status'] = "success"
res['message'] = "Started streaming tweets with keywords {}".format(keywords)
except Exception as exc:
res['status'] = "error"
res['message'] = exc.message
res['args'] = exc.args
return json_response(res)
@app.route('/api2', methods=["GET","POST"])
def search_handler():
es_size = int(request.args.get('size', 100))
es_from = int(request.args.get('from', 0))
data = json.loads(request.data)
criteria = data.get('criteria')
sort = data.get('sort')
s = QueryBuilder(criteria).search(index='tweets_index', doc_type='tweet')
if sort:
s = s.sort(*sort)
s = s[es_from:es_size]
print "[QUERY]", QueryBuilder.get_raw_query(s)
try:
es_res = QueryBuilder.execute(s)
except Exception as ex:
res = {
"status": "error",
"message": ex.message,
"args": ex.args
}
return json_response(res)
res = dict()
if es_res is not None:
hits = es_res.hits
res["count"] = {"total": hits.total, "fetched": len(hits.hits) }
res["results"] = hits.hits
return json_response(res)
@app.route('/api3', methods=["GET","POST"])
def jsontocsv():
data = json.loads(request.data)
criteria = data.get('criteria')
sort = data.get('sort')
s = QueryBuilder(criteria).search(index='tweets_index', doc_type='tweet')
if sort:
s = s.sort(*sort)
print "[QUERY]", QueryBuilder.get_raw_query(s)
try:
es_res = QueryBuilder.execute(s)
except Exception as ex:
res = {
"status": "error",
"message": ex.message,
"args": ex.args
}
return json_response(res)
res = dict()
if es_res is not None:
hits = es_res.hits
res["count"] = {"total": hits.total, "fetched": len(hits.hits) }
res["results"] = hits.hits
return json_to_csv(res) | 0.30715 | 0.092442 |
from __future__ import division, print_function
import numpy as np
from einops import rearrange
from math import ceil
class Coherence:
"""compute coherence magnitude and sine and cosine of coherence phase.
C: Number of sensors
T: Number of time frames
F: Number of frequency bins
>>> coherence_extractor = Coherence()
>>> coherence_extractor.transform(np.random.randn(4, 100, 257)).shape
(18, 100, 257)
"""
def __init__(self, smooth_len=21):
"""
Args:
smooth_len: number frames in sliding window to smooth the psd
estimation.
"""
self.smooth_len = smooth_len
def transform(self, x):
"""
Args:
x: STFT signal with shape (C, T, F)
Returns: Feature map of shape (3 * C * (C - 1) / 2, T, F)
"""
psds = np.einsum('...ctf,...dtf->...tfcd', x, x.conj())
if self.smooth_len is not None:
pad_width = psds.ndim * [(0, 0)]
pad_width[0] = (
(self.smooth_len - 1) // 2, ceil((self.smooth_len - 1) / 2)
)
psds = np.pad(psds, pad_width=pad_width, mode='reflect')
meanfilt = np.ones(self.smooth_len) / self.smooth_len
psds = np.apply_along_axis(
lambda m: np.correlate(m, meanfilt, mode='valid'),
axis=0, arr=psds
)
apsds = np.diagonal(psds, axis1=-2, axis2=-1)
coherence = psds / (
np.sqrt(apsds[..., None] * apsds[..., None, :]) +
np.finfo(psds.dtype).eps
)
triu_idx = np.triu(np.ones_like(coherence), k=1) > 0.5
coherence = coherence[triu_idx].reshape(
(*coherence.shape[:-2], -1)
)
angle = np.angle(coherence)
coherence = np.concatenate(
[np.abs(coherence), np.sin(angle), np.cos(angle)], axis=-1
)
assert np.all(np.abs(coherence)**2 >= 0.), np.min(np.abs(coherence)**2)
assert (np.abs(coherence)**2 <= (1. + 1e-6)).all(), (np.abs(coherence)**2).max()
return rearrange(coherence, 't f c -> c t f')
def __call__(self, example):
example['coherence'] = self.transform(example['stft'])
return example
class IPDExtractor:
"""compute sine and cosine of inter-channel phase differences
C: Number of sensors
T: Number of time frames
F: Number of frequency bins
>>> ipd_extractor = IPDExtractor()
>>> ipd_extractor.transform(np.random.randn(4, 100, 257)).shape
(6, 100, 257)
"""
def __init__(self, reference_channel=0):
self.reference_channel = reference_channel
def transform(self, x):
"""
Args:
x: STFT signal with shape (C, T, F)
Returns: Feature map of shape (2 * (C - 1), T, F)
"""
num_channels = x.shape[0]
all_other_channels = [
d for d in range(num_channels)
if not d == self.reference_channel
]
ipd = np.angle(
x[all_other_channels] * x[[self.reference_channel]].conj()
)
sin_ipd = np.sin(ipd)
cos_ipd = np.cos(ipd)
return np.concatenate([sin_ipd, cos_ipd], axis=0)
def __call__(self, example):
example['ipd'] = self.transform(example['stft'])
return example | sins/features/spatial.py | from __future__ import division, print_function
import numpy as np
from einops import rearrange
from math import ceil
class Coherence:
"""compute coherence magnitude and sine and cosine of coherence phase.
C: Number of sensors
T: Number of time frames
F: Number of frequency bins
>>> coherence_extractor = Coherence()
>>> coherence_extractor.transform(np.random.randn(4, 100, 257)).shape
(18, 100, 257)
"""
def __init__(self, smooth_len=21):
"""
Args:
smooth_len: number frames in sliding window to smooth the psd
estimation.
"""
self.smooth_len = smooth_len
def transform(self, x):
"""
Args:
x: STFT signal with shape (C, T, F)
Returns: Feature map of shape (3 * C * (C - 1) / 2, T, F)
"""
psds = np.einsum('...ctf,...dtf->...tfcd', x, x.conj())
if self.smooth_len is not None:
pad_width = psds.ndim * [(0, 0)]
pad_width[0] = (
(self.smooth_len - 1) // 2, ceil((self.smooth_len - 1) / 2)
)
psds = np.pad(psds, pad_width=pad_width, mode='reflect')
meanfilt = np.ones(self.smooth_len) / self.smooth_len
psds = np.apply_along_axis(
lambda m: np.correlate(m, meanfilt, mode='valid'),
axis=0, arr=psds
)
apsds = np.diagonal(psds, axis1=-2, axis2=-1)
coherence = psds / (
np.sqrt(apsds[..., None] * apsds[..., None, :]) +
np.finfo(psds.dtype).eps
)
triu_idx = np.triu(np.ones_like(coherence), k=1) > 0.5
coherence = coherence[triu_idx].reshape(
(*coherence.shape[:-2], -1)
)
angle = np.angle(coherence)
coherence = np.concatenate(
[np.abs(coherence), np.sin(angle), np.cos(angle)], axis=-1
)
assert np.all(np.abs(coherence)**2 >= 0.), np.min(np.abs(coherence)**2)
assert (np.abs(coherence)**2 <= (1. + 1e-6)).all(), (np.abs(coherence)**2).max()
return rearrange(coherence, 't f c -> c t f')
def __call__(self, example):
example['coherence'] = self.transform(example['stft'])
return example
class IPDExtractor:
"""compute sine and cosine of inter-channel phase differences
C: Number of sensors
T: Number of time frames
F: Number of frequency bins
>>> ipd_extractor = IPDExtractor()
>>> ipd_extractor.transform(np.random.randn(4, 100, 257)).shape
(6, 100, 257)
"""
def __init__(self, reference_channel=0):
self.reference_channel = reference_channel
def transform(self, x):
"""
Args:
x: STFT signal with shape (C, T, F)
Returns: Feature map of shape (2 * (C - 1), T, F)
"""
num_channels = x.shape[0]
all_other_channels = [
d for d in range(num_channels)
if not d == self.reference_channel
]
ipd = np.angle(
x[all_other_channels] * x[[self.reference_channel]].conj()
)
sin_ipd = np.sin(ipd)
cos_ipd = np.cos(ipd)
return np.concatenate([sin_ipd, cos_ipd], axis=0)
def __call__(self, example):
example['ipd'] = self.transform(example['stft'])
return example | 0.930023 | 0.508483 |
import os
import qtpy.QtCore
from qtpy.QtWidgets import (QWidget, QVBoxLayout, QSplitter)
from bioimageit_gui.core.framework import BiAction, BiComponent
from bioimageit_gui.runner import (BiRunnerStates, BiRunnerContainer,
BiRunnerModel, BiRunnerComponent,
BiGuiProgressObserver)
class BiRunnerViewApp(BiComponent):
def __init__(self, xml_file: str, viewer):
super().__init__()
self.viewer = viewer
# components
self.runnerContainer = BiRunnerContainer()
self.runnerModel = BiRunnerModel(self.runnerContainer)
self.runnerComponent = BiRunnerComponent(self.runnerContainer)
self.runnerContainer.register(self)
# connect observer
progressObserver = BiGuiProgressObserver()
self.runnerModel.observer = progressObserver
progressObserver.progressSignal.connect(
self.runnerComponent.progressValue)
progressObserver.messageSignal.connect(
self.runnerComponent.progressMessage)
# initialization
self.runnerContainer.process_uri = xml_file
self.runnerContainer.emit(BiRunnerStates.ProcessUriChanged)
# Widget
self.widget = QWidget()
self.widget.setObjectName('BiWidget')
self.widget.setAttribute(qtpy.QtCore.Qt.WA_StyledBackground, True)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.widget.setLayout(layout)
layout.addWidget(self.runnerComponent.get_widget())
def update(self, action: BiAction):
if action.state == BiRunnerStates.RunFinished:
for out in self.runnerContainer.genarated_outputs:
self.viewer.setVisible(True)
for fileinfo in out:
print('open output', fileinfo)
name = os.path.basename(fileinfo['uri'])
self.viewer.add_data(fileinfo['uri'], name, fileinfo['format'])
if action.state == BiRunnerStates.ClickedView:
self.viewer.setVisible(True)
name = os.path.basename(self.runnerContainer.clicked_view_uri)
print("view data with info:")
print("name:", name)
print("uri:", self.runnerContainer.clicked_view_uri)
print("format:", self.runnerContainer.clicked_view_format)
self.viewer.add_data(self.runnerContainer.clicked_view_uri,
name,
self.runnerContainer.clicked_view_format)
def get_widget(self):
return self.widget
class BiRunnerApp(BiComponent):
def __init__(self, xml_file: str):
super().__init__()
self.show_viewer = True
# components
self.runnerContainer = BiRunnerContainer()
self.runnerModel = BiRunnerModel(self.runnerContainer)
self.runnerComponent = BiRunnerComponent(self.runnerContainer)
# connect observer
progressObserver = BiGuiProgressObserver()
self.runnerModel.observer = progressObserver
progressObserver.progressSignal.connect(
self.runnerComponent.progressValue)
progressObserver.messageSignal.connect(
self.runnerComponent.progressMessage)
# initialization
self.runnerContainer.process_uri = xml_file
self.runnerContainer.emit(BiRunnerStates.ProcessUriChanged)
# Widget
self.widget = QWidget()
self.widget.setObjectName('BiWidget')
self.widget.setAttribute(qtpy.QtCore.Qt.WA_StyledBackground, True)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.widget.setLayout(layout)
layout.addWidget(self.runnerComponent.get_widget())
def update(self, action: BiAction):
pass
def get_widget(self):
return self.widget | bioimageit_gui/apps/runnerapp.py | import os
import qtpy.QtCore
from qtpy.QtWidgets import (QWidget, QVBoxLayout, QSplitter)
from bioimageit_gui.core.framework import BiAction, BiComponent
from bioimageit_gui.runner import (BiRunnerStates, BiRunnerContainer,
BiRunnerModel, BiRunnerComponent,
BiGuiProgressObserver)
class BiRunnerViewApp(BiComponent):
def __init__(self, xml_file: str, viewer):
super().__init__()
self.viewer = viewer
# components
self.runnerContainer = BiRunnerContainer()
self.runnerModel = BiRunnerModel(self.runnerContainer)
self.runnerComponent = BiRunnerComponent(self.runnerContainer)
self.runnerContainer.register(self)
# connect observer
progressObserver = BiGuiProgressObserver()
self.runnerModel.observer = progressObserver
progressObserver.progressSignal.connect(
self.runnerComponent.progressValue)
progressObserver.messageSignal.connect(
self.runnerComponent.progressMessage)
# initialization
self.runnerContainer.process_uri = xml_file
self.runnerContainer.emit(BiRunnerStates.ProcessUriChanged)
# Widget
self.widget = QWidget()
self.widget.setObjectName('BiWidget')
self.widget.setAttribute(qtpy.QtCore.Qt.WA_StyledBackground, True)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.widget.setLayout(layout)
layout.addWidget(self.runnerComponent.get_widget())
def update(self, action: BiAction):
if action.state == BiRunnerStates.RunFinished:
for out in self.runnerContainer.genarated_outputs:
self.viewer.setVisible(True)
for fileinfo in out:
print('open output', fileinfo)
name = os.path.basename(fileinfo['uri'])
self.viewer.add_data(fileinfo['uri'], name, fileinfo['format'])
if action.state == BiRunnerStates.ClickedView:
self.viewer.setVisible(True)
name = os.path.basename(self.runnerContainer.clicked_view_uri)
print("view data with info:")
print("name:", name)
print("uri:", self.runnerContainer.clicked_view_uri)
print("format:", self.runnerContainer.clicked_view_format)
self.viewer.add_data(self.runnerContainer.clicked_view_uri,
name,
self.runnerContainer.clicked_view_format)
def get_widget(self):
return self.widget
class BiRunnerApp(BiComponent):
def __init__(self, xml_file: str):
super().__init__()
self.show_viewer = True
# components
self.runnerContainer = BiRunnerContainer()
self.runnerModel = BiRunnerModel(self.runnerContainer)
self.runnerComponent = BiRunnerComponent(self.runnerContainer)
# connect observer
progressObserver = BiGuiProgressObserver()
self.runnerModel.observer = progressObserver
progressObserver.progressSignal.connect(
self.runnerComponent.progressValue)
progressObserver.messageSignal.connect(
self.runnerComponent.progressMessage)
# initialization
self.runnerContainer.process_uri = xml_file
self.runnerContainer.emit(BiRunnerStates.ProcessUriChanged)
# Widget
self.widget = QWidget()
self.widget.setObjectName('BiWidget')
self.widget.setAttribute(qtpy.QtCore.Qt.WA_StyledBackground, True)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.widget.setLayout(layout)
layout.addWidget(self.runnerComponent.get_widget())
def update(self, action: BiAction):
pass
def get_widget(self):
return self.widget | 0.431584 | 0.068382 |
from typing import Any, Dict, Optional
import logging
from subprocess import call
import yaml
import speech_recognition as sr
import spacy
logging.getLogger().setLevel(logging.INFO)
def _parse_configs() -> Dict[str, str]:
with open("config.yaml") as f:
config = yaml.full_load(f)
assert config, "Failed loading config YAML"
return config
CONFIG = _parse_configs()
def _speed_to_text() -> str:
text = ""
while not text:
try:
logging.info("Listening...")
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(
source,
timeout=5, # wait at most 5 seconds before a phrase starts
phrase_time_limit=5, # wait at most 5 seconds between phrases, otherwise stop and return
)
logging.info("Recognizing your voice...")
text = r.recognize_google(audio)
logging.info("You said: {}".format(text))
except:
logging.info("Voice not recognized")
output("Sorry I didn't hear you, could you please say it again?")
return text
def get_user_input(type_: str = CONFIG["input_option"]) -> str:
if type_ == "voice":
return _speed_to_text()
else:
return input()
def _say(text: str, rate: int = 220):
logging.info("Bot said: {}".format(text))
call('say -r {} "{}"'.format(rate, text), shell=True)
def output(text: str, type_: str = CONFIG["input_option"], rate: int = 220):
if type_ == "voice":
_say(text, rate)
else:
print(text)
def load_spacy_model(model_name: str = CONFIG["spacy_model"]) -> Any:
"""
Loading Spacy model for name entity recognition.
Pre-trained models should be downloaded in advance by `python -m spacy download <model_name>`
"""
logging.info("Loading pre-trained model {}".format(model_name))
nlp = spacy.load(model_name)
logging.info("Spacy model loaded")
return nlp
_NLP = load_spacy_model()
def extract_entity_from_text(text: str, type_: str = "GPE") -> Optional[str]:
"""Extract GPE (location) or ORG (company name) entity from given text."""
assert type_ in {"GPE", "ORG"}, "Entity type not supported."
doc = _NLP(text)
for ent in doc.ents:
if ent.label_ == type_:
return str(ent)
return None | chatbot/util.py | from typing import Any, Dict, Optional
import logging
from subprocess import call
import yaml
import speech_recognition as sr
import spacy
logging.getLogger().setLevel(logging.INFO)
def _parse_configs() -> Dict[str, str]:
with open("config.yaml") as f:
config = yaml.full_load(f)
assert config, "Failed loading config YAML"
return config
CONFIG = _parse_configs()
def _speed_to_text() -> str:
text = ""
while not text:
try:
logging.info("Listening...")
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(
source,
timeout=5, # wait at most 5 seconds before a phrase starts
phrase_time_limit=5, # wait at most 5 seconds between phrases, otherwise stop and return
)
logging.info("Recognizing your voice...")
text = r.recognize_google(audio)
logging.info("You said: {}".format(text))
except:
logging.info("Voice not recognized")
output("Sorry I didn't hear you, could you please say it again?")
return text
def get_user_input(type_: str = CONFIG["input_option"]) -> str:
if type_ == "voice":
return _speed_to_text()
else:
return input()
def _say(text: str, rate: int = 220):
logging.info("Bot said: {}".format(text))
call('say -r {} "{}"'.format(rate, text), shell=True)
def output(text: str, type_: str = CONFIG["input_option"], rate: int = 220):
if type_ == "voice":
_say(text, rate)
else:
print(text)
def load_spacy_model(model_name: str = CONFIG["spacy_model"]) -> Any:
"""
Loading Spacy model for name entity recognition.
Pre-trained models should be downloaded in advance by `python -m spacy download <model_name>`
"""
logging.info("Loading pre-trained model {}".format(model_name))
nlp = spacy.load(model_name)
logging.info("Spacy model loaded")
return nlp
_NLP = load_spacy_model()
def extract_entity_from_text(text: str, type_: str = "GPE") -> Optional[str]:
"""Extract GPE (location) or ORG (company name) entity from given text."""
assert type_ in {"GPE", "ORG"}, "Entity type not supported."
doc = _NLP(text)
for ent in doc.ents:
if ent.label_ == type_:
return str(ent)
return None | 0.587825 | 0.183466 |
#-----------imports------------------------
import os
import boto3, uuid
import psycopg2
import datetime
#-----------main---------------------------
def aws_image_handler(bucket, suffix=''):
# s3 variables used in all three functions
# enter your own image names in the old_names list
old_names = ['image1.jpg', 'image2.jpg', 'image3.jpg', 'image4.jpg']
client = boto3.client('s3')
kwargs = {'Bucket': bucket}
# iterate through, copy existing images and rename with uuid
def get_s3_images(bucket, suffix, client, kwargs, old_names):
count = 0
while True:
resp = client.list_objects_v2(**kwargs)
for obj in resp['Contents']:
if obj['Key'].endswith(suffix):
key_path = obj['Key']
image = key_path.rsplit('/')[-1].rstrip()
if image in old_names:
count += 1
new_name = key_path.replace(image, str(uuid.uuid4()) + suffix)
print('#{c} copying {k} to new file {n}...'.format(c=count, k=key_path, n=new_name))
client.copy_object(Bucket=bucket, CopySource=bucket + '/' + key_path, Key=new_name)
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
print('getting s3 images')
get_s3_images(bucket, suffix, client, kwargs, old_names)
# delete all images with old names
def delete_old(bucket, suffix, client, kwargs, old_names):
count = 0
while True:
resp = client.list_objects_v2(**kwargs)
for obj in resp['Contents']:
key_path = obj['Key']
image = key_path.rsplit('/')[-1].rstrip()
if key_path.endswith(suffix) and image in old_names:
count += 1
print('#{c} deleting {k}...'.format(c=count, k=key_path))
client.delete_object(Bucket=bucket, Key=key_path)
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
print('deleting images with old names')
delete_old(bucket, suffix, client, kwargs, old_names)
# update the aws postgres rds db with new image names/urls
def update_db(bucket, suffix, client, kwargs):
# Database Connection Info - set these locally using your own aws credentials, etc.
database = os.environ.get('DB_NAME')
username = os.environ.get('DB_USER')
password = <PASSWORD>('<PASSWORD>')
host = os.environ.get('DB_HOST')
port = os.environ.get('DB_PORT')
# connection string
conn_string = "dbname='%s' user='%s' host='%s' password='%s' port='%s'" % (database, username, host, password, port)
# db table names
collection_table = 'collection'
image_table = 'image'
# s3 stuff to be used later. enter your own base url here.
base_url = 'https://s3.amazonaws.com/bucket-name-here/'
# connect to the database
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
# get response from both collection and image table queries
db_response = cur.fetchall()
image_errors = []
unique_cols = []
while True:
# s3 stuff
s3_resp = client.list_objects_v2(**kwargs)
count = 0
for obj in s3_resp['Contents']:
key = obj['Key']
if key.endswith(suffix) and key is not None:
print(key)
img_id = uuid.UUID(key.split('/')[-1].rstrip(suffix).strip())
img_url = base_url + key.strip()
col_id = uuid.UUID(key.split('/')[0].strip())
timestamp = datetime.datetime.now()
# insert values into the rds/postgres image table
print("inserting image {}".format(count))
cur.execute("INSERT INTO {table} ({id},{url},{create},{modify},{col}) VALUES ('{v1}','{v2}','{v3}','{v4}','{v5}');".format(
table=image_table,
id='image_id',
url='image_url',
create='created',
modify='last_modified',
col='collection_id',
v1=img_id,
v2=img_url,
v3=timestamp,
v4=timestamp,
v5=col_id)
)
try:
conn.commit()
except:
# add the column id of any records with errors to the image_errors list
image_errors.append(col_id)
# update the collection table with the new s3 image url and uuid
cur.execute("UPDATE {table} SET {field} = '{url}' WHERE collection_id = '{col_id}';".format(
table=collection_table,
field='thumbnail_image',
url=img_url,
col_id=col_id)
)
try:
conn.commit()
except:
# add the collection id of any records with errors to the image_errors list
image_errors.append(col_id)
try:
kwargs['ContinuationToken'] = s3_resp['NextContinuationToken']
except KeyError:
break
print('updating db...')
update_db(bucket, suffix, client, kwargs)
print('Bad collections...')
print(image_errors)
# run main function aws_image_handler with necessary arguments
if __name__ == '__main__':
aws_image_handler(bucket='bucket-name-here', suffix='.jpg') | script.py | #-----------imports------------------------
import os
import boto3, uuid
import psycopg2
import datetime
#-----------main---------------------------
def aws_image_handler(bucket, suffix=''):
# s3 variables used in all three functions
# enter your own image names in the old_names list
old_names = ['image1.jpg', 'image2.jpg', 'image3.jpg', 'image4.jpg']
client = boto3.client('s3')
kwargs = {'Bucket': bucket}
# iterate through, copy existing images and rename with uuid
def get_s3_images(bucket, suffix, client, kwargs, old_names):
count = 0
while True:
resp = client.list_objects_v2(**kwargs)
for obj in resp['Contents']:
if obj['Key'].endswith(suffix):
key_path = obj['Key']
image = key_path.rsplit('/')[-1].rstrip()
if image in old_names:
count += 1
new_name = key_path.replace(image, str(uuid.uuid4()) + suffix)
print('#{c} copying {k} to new file {n}...'.format(c=count, k=key_path, n=new_name))
client.copy_object(Bucket=bucket, CopySource=bucket + '/' + key_path, Key=new_name)
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
print('getting s3 images')
get_s3_images(bucket, suffix, client, kwargs, old_names)
# delete all images with old names
def delete_old(bucket, suffix, client, kwargs, old_names):
count = 0
while True:
resp = client.list_objects_v2(**kwargs)
for obj in resp['Contents']:
key_path = obj['Key']
image = key_path.rsplit('/')[-1].rstrip()
if key_path.endswith(suffix) and image in old_names:
count += 1
print('#{c} deleting {k}...'.format(c=count, k=key_path))
client.delete_object(Bucket=bucket, Key=key_path)
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
print('deleting images with old names')
delete_old(bucket, suffix, client, kwargs, old_names)
# update the aws postgres rds db with new image names/urls
def update_db(bucket, suffix, client, kwargs):
# Database Connection Info - set these locally using your own aws credentials, etc.
database = os.environ.get('DB_NAME')
username = os.environ.get('DB_USER')
password = <PASSWORD>('<PASSWORD>')
host = os.environ.get('DB_HOST')
port = os.environ.get('DB_PORT')
# connection string
conn_string = "dbname='%s' user='%s' host='%s' password='%s' port='%s'" % (database, username, host, password, port)
# db table names
collection_table = 'collection'
image_table = 'image'
# s3 stuff to be used later. enter your own base url here.
base_url = 'https://s3.amazonaws.com/bucket-name-here/'
# connect to the database
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
# get response from both collection and image table queries
db_response = cur.fetchall()
image_errors = []
unique_cols = []
while True:
# s3 stuff
s3_resp = client.list_objects_v2(**kwargs)
count = 0
for obj in s3_resp['Contents']:
key = obj['Key']
if key.endswith(suffix) and key is not None:
print(key)
img_id = uuid.UUID(key.split('/')[-1].rstrip(suffix).strip())
img_url = base_url + key.strip()
col_id = uuid.UUID(key.split('/')[0].strip())
timestamp = datetime.datetime.now()
# insert values into the rds/postgres image table
print("inserting image {}".format(count))
cur.execute("INSERT INTO {table} ({id},{url},{create},{modify},{col}) VALUES ('{v1}','{v2}','{v3}','{v4}','{v5}');".format(
table=image_table,
id='image_id',
url='image_url',
create='created',
modify='last_modified',
col='collection_id',
v1=img_id,
v2=img_url,
v3=timestamp,
v4=timestamp,
v5=col_id)
)
try:
conn.commit()
except:
# add the column id of any records with errors to the image_errors list
image_errors.append(col_id)
# update the collection table with the new s3 image url and uuid
cur.execute("UPDATE {table} SET {field} = '{url}' WHERE collection_id = '{col_id}';".format(
table=collection_table,
field='thumbnail_image',
url=img_url,
col_id=col_id)
)
try:
conn.commit()
except:
# add the collection id of any records with errors to the image_errors list
image_errors.append(col_id)
try:
kwargs['ContinuationToken'] = s3_resp['NextContinuationToken']
except KeyError:
break
print('updating db...')
update_db(bucket, suffix, client, kwargs)
print('Bad collections...')
print(image_errors)
# run main function aws_image_handler with necessary arguments
if __name__ == '__main__':
aws_image_handler(bucket='bucket-name-here', suffix='.jpg') | 0.2763 | 0.0771 |
import cv2
import numpy as np
from PIL import Image
def RGB2BGR(image):
image1=np.zeros_like(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
image1[i][j][0]=image[i][j][2]
image1[i][j][1]=image[i][j][1]
image1[i][j][2]=image[i][j][0]
return image1
def masking_image(image,lower,upper):
image_copy=image
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask=np.zeros((image.shape[0],image.shape[1],image.shape[2]),dtype=np.uint8)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i][j][0] >= lower[0] and image[i][j][1] >= lower[1] and image[i][j][2] >= lower[2] and image[i][j][0] <= upper[0] and image[i][j][1] <= upper[1] and image[i][j][2] <= upper[2]:
mask[i][j][0] = image[i][j][0]
mask[i][j][1] = image[i][j][1]
mask[i][j][2] = image[i][j][2]
image=np.bitwise_and(mask,image)
image= cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
return image
def convolve3d(image, kernel):
output = np.zeros_like(image)
image_padded = np.zeros((image.shape[0]+kernel.shape[0]-1,image.shape[1] + kernel.shape[1]-1,image.shape[2]))
image_padded[kernel.shape[0]-2:-1:,kernel.shape[1]-2:-1:,:] = image
image_padded[0,0,:]=image[0,0,:]
image_padded[-1,-1,:]=image[-1,-1,:]
for x in range(image.shape[1]):
for y in range(image.shape[0]):
for z in range(image.shape[2]):
output[y,x,z]=(kernel * image_padded[y: y+kernel.shape[0], x: x+kernel.shape[1],z]).sum()
return output
gaussian_blurr=np.array([[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]])/256
file_name="mask.jpeg"
im = np.array(Image.open(file_name))
lower_blue = np.array([94,130,38])
upper_blue = np.array([179,255,255])
im=RGB2BGR(im)
im=masking_image(convolve3d(im,gaussian_blurr),lower_blue,upper_blue)
pil_img=Image.fromarray(im.astype(np.uint8))
pil_img.save('masked.jpeg') | 5.Masking/masking.py | import cv2
import numpy as np
from PIL import Image
def RGB2BGR(image):
image1=np.zeros_like(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
image1[i][j][0]=image[i][j][2]
image1[i][j][1]=image[i][j][1]
image1[i][j][2]=image[i][j][0]
return image1
def masking_image(image,lower,upper):
image_copy=image
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask=np.zeros((image.shape[0],image.shape[1],image.shape[2]),dtype=np.uint8)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i][j][0] >= lower[0] and image[i][j][1] >= lower[1] and image[i][j][2] >= lower[2] and image[i][j][0] <= upper[0] and image[i][j][1] <= upper[1] and image[i][j][2] <= upper[2]:
mask[i][j][0] = image[i][j][0]
mask[i][j][1] = image[i][j][1]
mask[i][j][2] = image[i][j][2]
image=np.bitwise_and(mask,image)
image= cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
return image
def convolve3d(image, kernel):
output = np.zeros_like(image)
image_padded = np.zeros((image.shape[0]+kernel.shape[0]-1,image.shape[1] + kernel.shape[1]-1,image.shape[2]))
image_padded[kernel.shape[0]-2:-1:,kernel.shape[1]-2:-1:,:] = image
image_padded[0,0,:]=image[0,0,:]
image_padded[-1,-1,:]=image[-1,-1,:]
for x in range(image.shape[1]):
for y in range(image.shape[0]):
for z in range(image.shape[2]):
output[y,x,z]=(kernel * image_padded[y: y+kernel.shape[0], x: x+kernel.shape[1],z]).sum()
return output
gaussian_blurr=np.array([[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]])/256
file_name="mask.jpeg"
im = np.array(Image.open(file_name))
lower_blue = np.array([94,130,38])
upper_blue = np.array([179,255,255])
im=RGB2BGR(im)
im=masking_image(convolve3d(im,gaussian_blurr),lower_blue,upper_blue)
pil_img=Image.fromarray(im.astype(np.uint8))
pil_img.save('masked.jpeg') | 0.286868 | 0.437163 |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class IscsiAdapterTargetPortalAuthChap(Base):
'''
Operations that can be performed on iSCSI target portal CHAP authentications
'''
moid = 'ha-cli-handler-iscsi-adapter-target-portal-auth-chap'
def set(self, adapter, address, name, authname=None, default=None, direction=None, inherit=None, level=None, secret=None):
'''
Set the iSCSI CHAP authentication for the iSCSI Target.
:param adapter: string, The iSCSI adapter name.
:param address: string, The iSCSI target address: <ip/dns[:port]>
:param authname: string, The iSCSI authentication name
:param default: boolean, Resetting iSCSI authentication setting to default.
:param direction: string, The iSCSI authentication direction ( [uni, mutual])
:param inherit: boolean, Inheriting iSCSI authentication setting from parent.
:param level: string, The iSCSI authentication level ( [prohibited, discouraged, preferred, required])
:param name: string, The iSCSI target name: <iqn/eui>
:param secret: string, The iSCSI authentication password
:returns: void
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.adapter.target.portal.auth.chap.Set',
adapter=adapter,
address=address,
authname=authname,
default=default,
direction=direction,
inherit=inherit,
level=level,
name=name,
secret=secret,
)
def get(self, adapter, address, name, direction=None, method=None):
'''
Get iSCSI CHAP authentication on a target
:param adapter: string, The iSCSI adapter name.
:param address: string, The iSCSI target address: <ip/dns[:port]>
:param direction: string, The iSCSI authentication direction ( [uni, mutual])
:param method: string, The iSCSI authentication method ( [chap])
:param name: string, The iSCSI target name: <iqn/eui>
:returns: vim.EsxCLI.iscsi.adapter.target.portal.auth.chap.get.TargetAuth[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.adapter.target.portal.auth.chap.Get',
adapter=adapter,
address=address,
direction=direction,
method=method,
name=name,
) | pyvisdk/esxcli/handlers/ha_cli_handler_iscsi_adapter_target_portal_auth_chap.py | from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class IscsiAdapterTargetPortalAuthChap(Base):
'''
Operations that can be performed on iSCSI target portal CHAP authentications
'''
moid = 'ha-cli-handler-iscsi-adapter-target-portal-auth-chap'
def set(self, adapter, address, name, authname=None, default=None, direction=None, inherit=None, level=None, secret=None):
'''
Set the iSCSI CHAP authentication for the iSCSI Target.
:param adapter: string, The iSCSI adapter name.
:param address: string, The iSCSI target address: <ip/dns[:port]>
:param authname: string, The iSCSI authentication name
:param default: boolean, Resetting iSCSI authentication setting to default.
:param direction: string, The iSCSI authentication direction ( [uni, mutual])
:param inherit: boolean, Inheriting iSCSI authentication setting from parent.
:param level: string, The iSCSI authentication level ( [prohibited, discouraged, preferred, required])
:param name: string, The iSCSI target name: <iqn/eui>
:param secret: string, The iSCSI authentication password
:returns: void
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.adapter.target.portal.auth.chap.Set',
adapter=adapter,
address=address,
authname=authname,
default=default,
direction=direction,
inherit=inherit,
level=level,
name=name,
secret=secret,
)
def get(self, adapter, address, name, direction=None, method=None):
'''
Get iSCSI CHAP authentication on a target
:param adapter: string, The iSCSI adapter name.
:param address: string, The iSCSI target address: <ip/dns[:port]>
:param direction: string, The iSCSI authentication direction ( [uni, mutual])
:param method: string, The iSCSI authentication method ( [chap])
:param name: string, The iSCSI target name: <iqn/eui>
:returns: vim.EsxCLI.iscsi.adapter.target.portal.auth.chap.get.TargetAuth[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.adapter.target.portal.auth.chap.Get',
adapter=adapter,
address=address,
direction=direction,
method=method,
name=name,
) | 0.672009 | 0.137706 |
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import StratifiedShuffleSplit
from keras import backend as k
import utils
import numpy as np
import matplotlib.pyplot as plt
(imageTrain, labelTrain), (imageTest, labelTest) = tf.keras.datasets.fashion_mnist.load_data()
plt.figure()
plt.imshow(imageTrain[0])
plt.grid(False)
labels = {0 : "T-shirt/top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat", 5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle Boot"}
imageTrain = np.expand_dims(imageTrain, -1)
imageTest = np.expand_dims(imageTest, -1)
sss = StratifiedShuffleSplit(n_splits=5, random_state=0, test_size=1/6)
trainIndex, validIndex = next(sss.split(imageTrain, labelTrain))
imageValid, labelValid = imageTrain[validIndex], labelTrain[validIndex]
imageTrain, labelTrain = imageTrain[trainIndex], labelTrain[trainIndex]
labelTrain = keras.utils.to_categorical(labelTrain)
labelValid = keras.utils.to_categorical(labelValid)
labelTest = keras.utils.to_categorical(labelTest)
print("Fashion MNIST train - rows: ", imageTrain.shape[0]," columns:", imageTrain.shape[1:4])
print("Fashion MNIST validation - rows: ", imageValid.shape[0]," columns:", imageValid.shape[1:4])
print("Fashion MNIST test - rows: ", imageTest.shape[0]," columns:", imageTest.shape[1:4])
"""
# Parameters
# Initializer parameter
* **glorot_normal** for Xavier initialization
* **he_normal** for He initialization
# Activation function:
* **relu** for ReLU
* **selu** for SELU
# Optimizer parameter:
* **keras.optimizers.Adam()** for ADAM optimizer
* **keras.optimizers.Adagrad()** for Adagrad optimizer
* **keras.optimizers.RMSprop()** for RMSProp optimizer
* **keras.optimizers.Adadelta()** for AdadeltaOptimizer()
# Filter size parameter: integer
# Epochs: integer
"""
parameters = {
'initializer': "he_normal",
'activation': keras.activations.relu,
'optimizer': keras.optimizers.Adam(),
'filterSize': 32
}
"""#Initiate Model"""
fashion = utils.training(parameters)
imageTrain = fashion.dataPreProcessing(imageTrain)
imageTest = fashion.dataPreProcessing(imageTest)
imageValid = fashion.dataPreProcessing(imageValid)
fashionModel = fashion.model(imageTrain, 3)
fashionModel.summary()
fashionTraining = fashionModel.fit(imageTrain, labelTrain, epochs = 10, verbose=1, validation_data=(imageValid, labelValid))
fashionTraining = fashionModel.fit(imageTrain, labelTrain, epochs=10, verbose=1, validation_data=(imageValid, labelValid))
test_loss, test_acc = fashionModel.evaluate(imageTest, labelTest)
print('Test accuracy:', test_acc)
print('Test loss:', test_loss)
validLoss, validAccuracy = fashionModel.evaluate(imageValid, labelValid)
print('Valid Accuracy:', validAccuracy)
print('Valid Loss:', validLoss)
print("Accuracy:", fashionTraining.history['acc'][9])
print("Loss:", fashionTraining.history['loss'][9])
print("Validation Accuracy:", fashionTraining.history['val_acc'][9])
print("Validation Loss:", fashionTraining.history['val_loss'][9])
print("Test Accuracy:", test_acc)
print("Test Loss:", test_loss)
fashionModel.save('drive/My Drive/Master Life/cnn_fmnist_L2.h5') | train.py | import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import StratifiedShuffleSplit
from keras import backend as k
import utils
import numpy as np
import matplotlib.pyplot as plt
(imageTrain, labelTrain), (imageTest, labelTest) = tf.keras.datasets.fashion_mnist.load_data()
plt.figure()
plt.imshow(imageTrain[0])
plt.grid(False)
labels = {0 : "T-shirt/top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat", 5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle Boot"}
imageTrain = np.expand_dims(imageTrain, -1)
imageTest = np.expand_dims(imageTest, -1)
sss = StratifiedShuffleSplit(n_splits=5, random_state=0, test_size=1/6)
trainIndex, validIndex = next(sss.split(imageTrain, labelTrain))
imageValid, labelValid = imageTrain[validIndex], labelTrain[validIndex]
imageTrain, labelTrain = imageTrain[trainIndex], labelTrain[trainIndex]
labelTrain = keras.utils.to_categorical(labelTrain)
labelValid = keras.utils.to_categorical(labelValid)
labelTest = keras.utils.to_categorical(labelTest)
print("Fashion MNIST train - rows: ", imageTrain.shape[0]," columns:", imageTrain.shape[1:4])
print("Fashion MNIST validation - rows: ", imageValid.shape[0]," columns:", imageValid.shape[1:4])
print("Fashion MNIST test - rows: ", imageTest.shape[0]," columns:", imageTest.shape[1:4])
"""
# Parameters
# Initializer parameter
* **glorot_normal** for Xavier initialization
* **he_normal** for He initialization
# Activation function:
* **relu** for ReLU
* **selu** for SELU
# Optimizer parameter:
* **keras.optimizers.Adam()** for ADAM optimizer
* **keras.optimizers.Adagrad()** for Adagrad optimizer
* **keras.optimizers.RMSprop()** for RMSProp optimizer
* **keras.optimizers.Adadelta()** for AdadeltaOptimizer()
# Filter size parameter: integer
# Epochs: integer
"""
parameters = {
'initializer': "he_normal",
'activation': keras.activations.relu,
'optimizer': keras.optimizers.Adam(),
'filterSize': 32
}
"""#Initiate Model"""
fashion = utils.training(parameters)
imageTrain = fashion.dataPreProcessing(imageTrain)
imageTest = fashion.dataPreProcessing(imageTest)
imageValid = fashion.dataPreProcessing(imageValid)
fashionModel = fashion.model(imageTrain, 3)
fashionModel.summary()
fashionTraining = fashionModel.fit(imageTrain, labelTrain, epochs = 10, verbose=1, validation_data=(imageValid, labelValid))
fashionTraining = fashionModel.fit(imageTrain, labelTrain, epochs=10, verbose=1, validation_data=(imageValid, labelValid))
test_loss, test_acc = fashionModel.evaluate(imageTest, labelTest)
print('Test accuracy:', test_acc)
print('Test loss:', test_loss)
validLoss, validAccuracy = fashionModel.evaluate(imageValid, labelValid)
print('Valid Accuracy:', validAccuracy)
print('Valid Loss:', validLoss)
print("Accuracy:", fashionTraining.history['acc'][9])
print("Loss:", fashionTraining.history['loss'][9])
print("Validation Accuracy:", fashionTraining.history['val_acc'][9])
print("Validation Loss:", fashionTraining.history['val_loss'][9])
print("Test Accuracy:", test_acc)
print("Test Loss:", test_loss)
fashionModel.save('drive/My Drive/Master Life/cnn_fmnist_L2.h5') | 0.836555 | 0.628208 |
import pytest
import sys
from mock import patch, call
from pathlib import Path
from textwrap import dedent
from phykit.phykit import Phykit
here = Path(__file__)
@pytest.mark.integration
class TestAlignmentLength(object):
@patch("builtins.print")
def test_alignment_length_incorrect_file_path(self, mocked_print):
expected_result = "Input file could not be read. Please check input file argument."
testargs = [
"phykit",
"alignment_length",
f"whoa",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@patch("builtins.print")
def test_alignment_length_test0(self, mocked_print):
expected_result = 6
testargs = [
"phykit",
"alignment_length",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_test1(self, mocked_print):
expected_result = 9
testargs = [
"phykit",
"alignment_length",
f"{here.parent.parent.parent}/sample_files/test_alignment_0.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_test_alias0(self, mocked_print):
expected_result = 9
testargs = [
"phykit",
"aln_len",
f"{here.parent.parent.parent}/sample_files/test_alignment_0.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_test_alias1(self, mocked_print):
expected_result = 9
testargs = [
"phykit",
"al",
f"{here.parent.parent.parent}/sample_files/test_alignment_0.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_incorrect_input_file(self, mocked_print):
expected_result = "Input file could not be read. Please check input file argument."
testargs = [
"phykit",
"alignment_length",
f"{here.parent.parent.parent}/sample_files/test_trees.txt",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2 | tests/integration/alignment/test_alignment_length_integration.py | import pytest
import sys
from mock import patch, call
from pathlib import Path
from textwrap import dedent
from phykit.phykit import Phykit
here = Path(__file__)
@pytest.mark.integration
class TestAlignmentLength(object):
@patch("builtins.print")
def test_alignment_length_incorrect_file_path(self, mocked_print):
expected_result = "Input file could not be read. Please check input file argument."
testargs = [
"phykit",
"alignment_length",
f"whoa",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
@patch("builtins.print")
def test_alignment_length_test0(self, mocked_print):
expected_result = 6
testargs = [
"phykit",
"alignment_length",
f"{here.parent.parent.parent}/sample_files/simple.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_test1(self, mocked_print):
expected_result = 9
testargs = [
"phykit",
"alignment_length",
f"{here.parent.parent.parent}/sample_files/test_alignment_0.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_test_alias0(self, mocked_print):
expected_result = 9
testargs = [
"phykit",
"aln_len",
f"{here.parent.parent.parent}/sample_files/test_alignment_0.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_test_alias1(self, mocked_print):
expected_result = 9
testargs = [
"phykit",
"al",
f"{here.parent.parent.parent}/sample_files/test_alignment_0.fa",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_alignment_length_incorrect_input_file(self, mocked_print):
expected_result = "Input file could not be read. Please check input file argument."
testargs = [
"phykit",
"alignment_length",
f"{here.parent.parent.parent}/sample_files/test_trees.txt",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2 | 0.404978 | 0.435781 |