id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1626284
|
import discord
import asyncio
import random
import steam
from steam.steamid import SteamId
from steam.steamprofile import SteamProfile
from steam.steamaccountuniverse import SteamAccountUniverse
from steam.steamaccounttype import SteamAccountType
from discord.ext import commands
from utils import checks
from mods.cog import Cog
code = "```py\n{0}\n```"
class Verification(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
self.bot.loop.create_task(self.verification_task())
async def remove_verification(self, server, idk=None):
role = discord.utils.get(server.roles, name='Awaiting Approval')
if role:
try:
await self.bot.delete_role(server, role)
except:
pass
sql = 'DELETE FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
sql = 'DELETE FROM `verification_queue` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
if idk is None:
try:
await self.bot.send_message(server.owner, ":warning: One of your server administrators (or you) have enabled approval/verification on user join.\n\nAdministrator permission was taken away from me making the feature unusable, I need Administrator permission to make/add a role to mute on join.\n\n`The system has been automatically disabled, re-enable anytime if you please.`")
except:
pass
@commands.group(pass_context=True, aliases=['onjoinverify', 'approval'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification(self, ctx, channel:discord.Channel=None, *, mentions:str=None):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if channel is None:
channel = ctx.message.channel
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
if mentions is None:
sql = "INSERT INTO `verification` (`server`, `channel`) VALUES (%s, %s)"
self.cursor.execute(sql, (ctx.message.server.id, channel.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification #<discord_channel>` to change)!".format(channel.mention))
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("invalid mention")
return
sql = "INSERT INTO `verification` (`server`, `channel`, `mentions`) VALUES (%s, %s, %s)"
mention_ids = []
mention_names = []
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
self.cursor.execute(sql, (ctx.message.server.id, channel.id, ' '.join(mention_ids)))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification <#discord_channel>` to change) and mention `{0}`!".format(channel.mention, ', '.join(mention_names)))
permissions = discord.Permissions()
permissions.read_messages = True
try:
await self.bot.create_role(ctx.message.server, name='Awaiting Approval', color=discord.Colour(int("FF0000", 16)), permissions=permissions)
except Exception as e:
print(e)
await self.bot.say(":warning: For some reason I couldn't create the \"Awaiting Approval\" role and users won't be muted, please create it (same name) and disable all the permissions you don't want unapproved-users to have.\nMake sure I have the administrator permission!")
elif channel is None:
sql = 'UPDATE `verification` SET channel={0} WHERE server={1}'
sql = sql.format(channel.id, ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Set approval/verification channel to {0}".format(channel.mention))
else:
await self.bot.say(':warning: You are about to disable member verification/approval on join, type `yes` to proceed.')
while True:
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author, channel=ctx.message.channel)
if response is None or response.content != 'yes':
await self.bot.say('**Aborting**')
return
else:
break
await self.remove_verification(ctx.message.server, True)
try:
role = discord.utils.get(ctx.message.server.roles, name='Awaiting Approval')
if role != None:
await self.bot.delete_role(ctx.message.server, role)
except discord.errors.Forbidden:
await self.bot.say("could not remove role, you took my perms away :(")
role2 = discord.utils.get(ctx.message.server.roles, name='Approved')
if role2 != None:
try:
await self.bot.delete_role(ctx.message.server, role2)
except:
pass
await self.bot.say(":negative_squared_cross_mark: **Disabled** user approval on join")
@verification.command(name='mention', aliases=['mentions'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification_mention(self, ctx, *mentions:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(ctx.message.mentions) == 0 and '@everyone' not in mentions and '@here' not in mentions:
await self.bot.say(':no_entry: `Invalid mention(s).`')
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
if len(mentions) == 0:
sql = 'UPDATE `verification` SET mentions=NULL WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":negative_squared_cross_mark: Disabled/Removed mentions on user join for approval")
else:
mention_ids = []
mention_names = []
everyone = False
for mention in mentions:
if mention == '@everyone':
mention_ids.append('@everyone')
elif mention == '@here':
mention_ids.append('@here')
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
sql = 'SELECT mentions FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
mention_results = self.cursor.execute(sql).fetchall()
update = False
if mention_results[0]['mentions'] != None:
update = True
things = mention_results[0]['mentions'].split()
for x in things:
mention_ids.append(x)
sql = "UPDATE `verification` SET mentions={0} WHERE server={1}"
sql = sql.format(self.escape(' '.join(mention_ids)), ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
if update:
await self.bot.say(":white_check_mark: Updated mentions to include `{0}` on user join for approval".format(', '.join(mention_names)))
else:
await self.bot.say(":white_check_mark: Set `{0}` to be mentioned on user join for approval".format(', '.join(mention_names)))
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.mod_or_perm(manage_server=True)
async def verify(self, ctx, *users:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(users) == 0:
await self.bot.say("pls input users to verify thx")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned **on** (`verification <#discord_channel>` to do so)!!!")
return
role = discord.utils.get(ctx.message.server.roles, name="Awaiting Approval")
count = 0
count2 = 0
discord_user = None
for user in users:
if user.isdigit():
user = int(user)
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count]['user']))
count += 1
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("If you're not gonna use approval id, atleast mention correctly!")
return
for x in ctx.message.mentions:
if count == len(ctx.message.mentions):
break
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count2]['user']))
count2 += 1
if discord_user is None:
continue
try:
await self.bot.remove_roles(discord_user, role)
except Exception as e:
await self.bot.say(code.format(e))
await self.bot.say(":warning: {0} was removed from the queue however his role could not be removed because I do not have Administrator permissions.\nPlease remove the role manually and give me **Administrator**.".format(user))
return
role = discord.utils.get(ctx.message.server.roles, name='Approved')
if role != None:
try:
await self.bot.add_roles(discord_user, role)
except:
pass
await self.bot.say(":white_check_mark: Removed `{0}` from queue!".format(user))
queue_removed_msg = 'You have been approved/verified for `{0}` and can now message!'.format(ctx.message.server.name)
await self.bot.send_message(discord_user, queue_removed_msg)
@verify.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def verify_list(self, ctx):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: `There are no users in the verification/approval queue`")
return
users = []
for s in result:
user = discord.Server.get_member(ctx.message.server, user_id=str(s['user']))
if user is None:
continue
users.append('{0}#{1} ({2})'.format(user.name, user.discriminator, str(s['id'])))
await self.bot.say("**{0} Users in Queue**\n`{1}`".format(len(users), ', '.join(users)))
# steam_regex = r"^(http|https|)(\:\/\/|)steamcommunity\.com\/id\/(.*)$"
@verify.command(name='check', pass_context=True, aliases=['steam', 'link'])
async def verify_check(self, ctx, stem:str):
try:
if ctx.message.channel.is_private is False:
await self.bot.say(':no_entry: `Private Message only.`')
return
sql = 'SELECT * FROM `verification_queue` WHERE user={0}'
sql = sql.format(ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: You are not in the verification queue for any server.')
return
server_id = result[0]['server']
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server_id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server you are in queue for disabled verification.")
return
sql = 'SELECT * FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) != 0:
await self.bot.say(":no_entry: You've already verified your steam account!")
return
sql = 'SELECT id,server FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(ctx.message.author))
return
verification_id = str(result[0]['id'])
steamId = None
steamProfile = None
if steamId is None:
steamId = SteamId.fromSteamId("{0}".format(stem))
if steamId is None:
steamId = SteamId.fromSteamId3(stem)
if steamId is None:
steamId = SteamId.fromSteamId64(stem)
if steamId is None:
steamId = SteamId.fromProfileUrl(stem)
if steamId is None:
steamProfile = SteamProfile.fromCustomProfileUrl(stem)
if steamProfile is None:
await self.bot.say("`:no_entry: `Bad Steam ID/64/URL`")
return
steamId = steamProfile.steamId
else:
steamProfile = SteamProfile.fromSteamId(steamId)
if verification_id in steamProfile.displayName:
sql = 'INSERT INTO `verification_steam` (`user`, `server`, `steam`, `id`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (ctx.message.author.id, server_id, steamId.profileUrl, verification_id))
self.cursor.commit()
await self.bot.say(':white_check_mark: `{0}` steam profile submitted and passed steam name check, awaiting moderator approval.'.format(ctx.message.author))
else:
await self.bot.say(':warning: **{0}** is not in the steam accounts name.'.format(verification_id))
except Exception as e:
await self.bot.say(code.format(e))
async def verification_task(self):
if self.bot.shard_id != 0:
return
while True:
sql = 'SELECT * FROM `verification_steam`'
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await asyncio.sleep(60)
continue
for s in result:
server = self.bot.manager.get_server(str(s['server']))
if server:
user = server.get_member(str(s['user']))
if user is None:
continue
sql = 'SELECT channel FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
channel = server.get_channel(str(self.cursor.execute(sql).fetchall()[0]['channel']))
msg = '**Steam Account Check**\n`{0} (Verification ID: {1})` has submitted their steam profile and passed the name check.\n`Steam Profile:` {2}'.format(user, s['id'], s['steam'])
await self.bot.send_message(channel, msg)
sql = 'DELETE FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server.id, user.id)
self.cursor.execute(sql)
self.cursor.commit()
await asyncio.sleep(60)
async def on_member_join(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
channel = server.get_channel(str(result[0]['channel']))
if channel is None:
raise discord.errors.NotFound
perms = server.me.permissions_in(channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.remove_verification(server)
return
sql = "INSERT INTO `verification_queue` (`user`, `server`, `id`) VALUES (%s, %s, %s)"
rand = random.randint(0, 99999)
self.cursor.execute(sql, (member.id, server.id, rand))
self.cursor.commit()
role = discord.utils.get(server.roles, name='Awaiting Approval')
await self.bot.add_roles(member, role)
for s in server.channels:
perms = member.permissions_in(s)
if perms.read_messages is False:
continue
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = False
await self.bot.edit_channel_permissions(s, role, overwrite)
msg = ''
if result[0]['mentions']:
for x in result[0]['mentions'].split(' '):
if 'everyone' in x or 'here' in x:
msg += '{0} '.format(x)
else:
msg += '<@{0}> '.format(x)
msg += '\n'
msg += ':warning: `{0}` has joined the server and is awaiting approval\n\nRun `verify {1} or mention` to approve, kick user to remove from the queue.'.format(member, rand)
await self.bot.send_message(channel, msg, replace_everyone=False, replace_mentions=False)
join_msg = "You've been placed in the approval queue for `{0}`, please be patient and wait until a staff member approves your join!\n\nIf you'd like to expedite approval (and have a steam account), place **{1}** in your steam name and then run `.verify check <stean_url/id/vanity>`.".format(server.name, rand)
await self.bot.send_message(member, join_msg)
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_remove(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for leaving the server or being kicked.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_ban(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for being banned from the server.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
def setup(bot):
bot.add_cog(Verification(bot))
|
1626316
|
import obspython as S
from itertools import cycle
class Example:
def __init__(self, source_name=None):
self.source_name = source_name
self.data = ""
def update_text(self):
source = S.obs_get_source_by_name(self.source_name)
if source is not None:
settings = S.obs_data_create()
S.obs_data_set_string(settings, "text", self.data)
S.obs_source_update(source, settings)
S.obs_data_release(settings)
S.obs_source_release(source)
eg = Example() # class created ,obs part starts
def callback(props, prop, settings):
_number = S.obs_data_get_int(settings, "_int")
_text_value = S.obs_data_get_string(settings, "_text")
text_property = S.obs_properties_get(props, "_text")
if _number > 50:
eg.data = _text_value + str(_number)
S.obs_property_set_visible(text_property, True)
return True
else:
eg.data = ""
S.obs_property_set_visible(text_property, False)
return True
def script_description():
return "Modification property example"
def script_update(settings):
eg.source_name = S.obs_data_get_string(settings, "source")
S.timer_remove(eg.update_text)
if eg.source_name != "":
S.timer_add(eg.update_text, 1 * 1000)
def script_properties(): # ui
props = S.obs_properties_create()
p = S.obs_properties_add_list(
props,
"source",
"Text Source",
S.OBS_COMBO_TYPE_EDITABLE,
S.OBS_COMBO_FORMAT_STRING,
)
sources = S.obs_enum_sources()
if sources is not None:
for source in sources:
source_id = S.obs_source_get_unversioned_id(source)
if source_id == "text_gdiplus" or source_id == "text_ft2_source":
name = S.obs_source_get_name(source)
S.obs_property_list_add_string(p, name, name)
S.source_list_release(sources)
number = S.obs_properties_add_int(props, "_int", "Number", 1, 100, 1)
text_value = S.obs_properties_add_text(
props, "_text", "Additional input:", S.OBS_TEXT_DEFAULT
)
S.obs_property_set_visible(text_value, False)
S.obs_property_set_modified_callback(number, callback)
return props
|
1626363
|
import logging
import unittest
import random
import copy
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL
from program import Program, Function, Variable, BasicPrimitive, New
from dsl import DSL
from pcfg_logprob import LogProbPCFG
from Predictions.IOencodings import FixedSizeEncoding, VariableSizeEncoding
from Predictions.embeddings import SimpleEmbedding#, RecurrentEmbedding
logging_levels = {0:logging.INFO, 1:logging.DEBUG}
verbosity = 0
logging.basicConfig(format='%(message)s', level=logging_levels[verbosity])
class TestSum(unittest.TestCase):
def test_encoding(self):
size_max = 2 # maximum number of elements in an input (= list)
nb_inputs_max = 5 # maximum number of inputs
lexicon = list(range(30))
IOEncoder = FixedSizeEncoding(
nb_inputs_max = nb_inputs_max,
lexicon = lexicon,
size_max = size_max,
)
encoding_output_dimension = 15 # fixing the dimension,
# only useful for VariableSizeEncoding
IOEncoder2 = VariableSizeEncoding(
nb_inputs_max = nb_inputs_max,
lexicon = lexicon,
output_dimension = encoding_output_dimension,
)
IO1 = [[[11,20], [3]], [12,2]]
IO2 = [[[12,23], [2,15], [4,2], [0]], [2]]
res = IOEncoder.encode_IO(IO1)
self.assertTrue(len(res) == IOEncoder.output_dimension)
res = IOEncoder2.encode_IO(IO1)
self.assertTrue(len(res) == IOEncoder2.output_dimension)
res = IOEncoder.encode_IO(IO2)
self.assertTrue(len(res) == IOEncoder.output_dimension)
res = IOEncoder2.encode_IO(IO2)
self.assertTrue(len(res) == IOEncoder2.output_dimension)
IOs = [IO1, IO2]
res = IOEncoder.encode_IOs(IOs)
self.assertTrue(res.size() == (len(IOs), IOEncoder.output_dimension))
res = IOEncoder2.encode_IOs(IOs)
self.assertTrue(res.size() == (len(IOs), IOEncoder2.output_dimension))
def test_embedding(self):
size_max = 2 # maximum number of elements in an input (= list)
nb_inputs_max = 5 # maximum number of inputs
lexicon = list(range(30))
IOEncoder = FixedSizeEncoding(
nb_inputs_max = nb_inputs_max,
lexicon = lexicon,
size_max = size_max,
)
encoding_output_dimension = 15 # fixing the dimension,
# only useful for VariableSizeEncoding
IOEncoder2 = VariableSizeEncoding(
nb_inputs_max = nb_inputs_max,
lexicon = lexicon,
output_dimension = encoding_output_dimension,
)
IO1 = [[[11,20], [3], [2], [23]], [12,2]]
IO2 = [[[12,23], [2,15], [4,2], [0]], [2]]
IOs = [IO1, IO2]
batch_IOs = [IOs, IOs, IOs]
embedding_output_dimension = 30
IOEmbedder = SimpleEmbedding(
IOEncoder = IOEncoder,
output_dimension = embedding_output_dimension,
)
print("output dimension of the encoder", IOEncoder.output_dimension)
res = IOEmbedder.forward_IOs(IOs)
self.assertTrue(res.size() == (len(IOs), IOEncoder.output_dimension, IOEmbedder.output_dimension))
# res = IOEmbedder.forward(batch_IOs)
# self.assertTrue(res.size() == (len(batch_IOs), IOEncoder.output_dimension, IOEmbedder.output_dimension))
# def test_predictions_noinputs(self):
# primitive_types = {
# "if": Arrow(BOOL, Arrow(INT, INT)),
# "+": Arrow(INT, Arrow(INT, INT)),
# "0": INT,
# "1": INT,
# "and": Arrow(BOOL, Arrow(BOOL, BOOL)),
# "lt": Arrow(INT, Arrow(INT, BOOL)),
# }
# semantics = {
# "if": lambda b: lambda x: lambda y: x if b else y,
# "+": lambda x: lambda y: x + y,
# "0": 0,
# "1": 1,
# "and": lambda b1: lambda b2: b1 and b2,
# "lt": lambda x: lambda y: x <= y,
# }
# template_dsl = DSL(semantics, primitive_types)
# type_request = INT
# template_cfg = template_dsl.DSL_to_CFG(type_request=type_request,
# upper_bound_type_size=4,
# max_program_depth=4,
# min_variable_depth=2,
# n_gram=2)
# list_variables = [
# Variable(i, type_, probability={})
# for i,type_ in enumerate(type_request.arguments())
# ]
# H = 128 # hidden size of neural network
# lexicon = list(range(10)) # all elements in range(10)
# fe = RecurrentFeatureExtractor(lexicon=lexicon,
# H=H,
# bidirectional=True)
# PCFG_predictor = PCFG_Predictor(
# fe,
# template_cfg=template_cfg
# )
# Q_predictor = Q_Predictor(
# fe,
# template_dsl=template_dsl,
# template_cfg=template_cfg,
# list_variables=list_variables,
# )
# programs = [
# Function(BasicPrimitive("+", Arrow(INT, Arrow(INT, INT))),[BasicPrimitive("0", INT),BasicPrimitive("1", INT)], INT),
# ]
# x = [] # input
# y = [] # output
# ex = (x,y) # a single input/output example
# tasks = [[ex]]
# PCFG_predictor.train(programs, tasks)
# PCFG_predictor.test(programs, tasks)
# Q_predictor.train(programs, tasks)
# Q_predictor.test(programs, tasks)
# def test_predictions_with_inputs(self):
# t0 = PolymorphicType('t0')
# t1 = PolymorphicType('t1')
# primitive_types = {
# "if": Arrow(BOOL, Arrow(INT, INT)),
# "+": Arrow(INT, Arrow(INT, INT)),
# "0": INT,
# "1": INT,
# "and": Arrow(BOOL, Arrow(BOOL, BOOL)),
# "lt": Arrow(INT, Arrow(INT, BOOL)),
# "map": Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1))),
# }
# semantics = {
# "if": lambda b: lambda x: lambda y: x if b else y,
# "+": lambda x: lambda y: x + y,
# "0": 0,
# "1": 1,
# "and": lambda b1: lambda b2: b1 and b2,
# "lt": lambda x: lambda y: x <= y,
# "map": lambda l: list(map(f, l)),
# }
# template_dsl = DSL(semantics, primitive_types)
# type_request = Arrow(List(INT), List(INT))
# template_cfg = template_dsl.DSL_to_CFG(type_request=type_request,
# upper_bound_type_size=10,
# max_program_depth=4,
# min_variable_depth=1,
# n_gram = 1)
# H = 128 # hidden size of neural network
# lexicon = list(range(10))
# fe = RecurrentFeatureExtractor(lexicon=lexicon,
# H=H,
# bidirectional=True)
# list_variables = [
# Variable(i, type_, probability={})
# for i,type_ in enumerate(type_request.arguments())
# ]
# PCFG_predictor = PCFG_Predictor(
# fe,
# template_cfg=template_cfg
# )
# Q_predictor = Q_Predictor(
# fe,
# template_dsl=template_dsl,
# template_cfg=template_cfg,
# list_variables=list_variables,
# )
# programs = [
# Function(
# BasicPrimitive("map", Arrow(Arrow(INT, INT), Arrow(List(INT), List(INT)))),
# [
# Function(
# BasicPrimitive("+", Arrow(INT, Arrow(INT, INT))),
# [BasicPrimitive("1", INT)],
# Arrow(INT, INT)
# ),
# Variable(0, List(INT))
# ],
# List(INT)
# ),
# Function(
# BasicPrimitive("map", Arrow(Arrow(INT, INT), Arrow(List(INT), List(INT)))),
# [
# Function(
# BasicPrimitive("+", Arrow(INT, Arrow(INT, INT))),
# [Function(
# BasicPrimitive("+", Arrow(INT, Arrow(INT, INT))),
# [BasicPrimitive("1", INT), BasicPrimitive("1", INT)],
# INT),
# ],
# Arrow(INT, INT)
# ),
# Variable(0, List(INT))
# ],
# List(INT)
# )
# ]
# # each task is a list of I/O
# # each I/O is a tuple of input, output
# # each output is a list whose members are elements of self.lexicon
# # each input is a tuple of lists, and each member of each such list is an element of self.lexicon
# x = ([4,4,2],) # input
# y = [5,5,3] # output
# ex1 = (x,y) # a single input/output example
# x = ([7,1],) # input
# y = [8,2] # output
# ex2 = (x,y) # a single input/output example
# task1 = [ex1,ex2] # a task is a list of input/outputs
# x = ([4,4,2],) # input
# y = [6,6,4] # output
# ex1 = (x,y) # a single input/output example
# task2 = [ex1] # a task is a list of input/outputs
# self.assertTrue fe.forward_one_task(task1).shape == torch.Size([H])
# # batched forward pass - test cases
# self.assertTrue fe.forward([task1,task2]).shape == torch.Size([2,H])
# self.assertTrue torch.all( fe.forward([task1,task2])[0] == fe.forward_one_task(task1) )
# self.assertTrue torch.all( fe.forward([task1,task2])[1] == fe.forward_one_task(task2) )
# # pooling of examples happens through averages - check via this self.assertTrue
# self.assertTrue(torch.stack([fe.forward_one_task(task1),fe.forward_one_task(task1)],0).mean(0) - fe.forward_one_task(task1)).abs().max() < 1e-5
# self.assertTrue(torch.stack([fe.forward_one_task(task1),fe.forward_one_task(task2)],0).mean(0) - fe.forward_one_task(task1)).abs().max() > 1e-5
# tasks = [task1,task2]
# PCFG_predictor.train(programs, tasks)
# PCFG_predictor.test(programs, tasks)
# Q_predictor.train(programs, tasks)
# Q_predictor.test(programs, tasks)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
1626385
|
import importlib
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'api'
verbose_name = 'Api'
def ready(self):
importlib.import_module('api.signals')
|
1626400
|
import requests, json
import pandas as pd
from dataiku.connector import Connector
import importio_utils
class ImportIOConnector(Connector):
def __init__(self, config):
"""Make the only API call, which downloads the data"""
Connector.__init__(self, config)
if self.config['api_url'].startswith('https://api.import.io/'):
self.api_version = 'api'
elif self.config['api_url'].startswith('https://extraction.import.io/'):
self.api_version = 'extraction'
else:
raise Exception(
'It looks like this URL is not an API URL. URLs to call the API (and get a json response) start with "https://api.import.io" .')
print '[import.io connector] calling API...'
response = requests.get(self.config['api_url'])
print '[import.io connector] got response'
try:
self.json = response.json()
except Exception as e:
print e
print 'response was:\n', response.text
raise
def get_read_schema(self):
if self.api_version == 'api':
columns = importio_utils.convert_schema(self.json['outputProperties'])
return {"columns":columns}
else:
return None
def generate_rows(self, dataset_schema=None, dataset_partitioning=None, partition_id=None, records_limit = -1):
if self.api_version == 'api':
for row in self.json['results']:
yield row
else:
df = pd.DataFrame(self.json['extractorData']['data'][0]['group'])
for col in df.columns:
lengths = df[col].apply(lambda x: len(x) if type(x) == list else 0)
if lengths.max() == 1:
df[col] = df[col].apply(lambda x: x[0] if type(x) == list else {})
keys = df[col].apply(lambda x: x.keys())
for key in set([key for line in keys for key in line]): # drop duplicates
df[col + '_' + key] = df[col].apply(lambda x: x.get(key,''))
del df[col]
else:
df[col] = df[col].apply(json.dumps)
for row in df.to_dict(orient='records'):
yield row
|
1626407
|
from bisect import bisect
from biicode.common.utils.bii_logging import logger
import difflib
from biicode.common.exception import BiiException
def _lcs_unique(a, b):
# set index[line in a] = position of line in a unless
# a is a duplicate, in which case it's set to None
index = {}
for i in xrange(len(a)):
line = a[i]
if line in index:
index[line] = None
else:
index[line] = i
# make btoa[i] = position of line i in a, unless
# that line doesn't occur exactly once in both,
# in which case it's set to None
btoa = [None] * len(b)
index2 = {}
for pos, line in enumerate(b):
next_ = index.get(line)
if next_ is not None:
if line in index2:
# unset the previous mapping, which we now know to
# be invalid because the line isn't unique
btoa[index2[line]] = None
del index[line]
else:
index2[line] = pos
btoa[pos] = next_
# this is the Patience sorting algorithm
# see http://en.wikipedia.org/wiki/Patience_sorting
backpointers = [None] * len(b)
stacks = []
lasts = []
k = 0
for bpos, apos in enumerate(btoa):
if apos is None:
continue
# as an optimization, check if the next line comes at the end,
# because it usually does
if stacks and stacks[-1] < apos:
k = len(stacks)
# as an optimization, check if the next line comes right after
# the previous line, because usually it does
elif stacks and stacks[k] < apos and (k == len(stacks) - 1 or stacks[k + 1] > apos):
k += 1
else:
k = bisect(stacks, apos)
if k > 0:
backpointers[bpos] = lasts[k - 1]
if k < len(stacks):
stacks[k] = apos
lasts[k] = bpos
else:
stacks.append(apos)
lasts.append(bpos)
if len(lasts) == 0:
return []
result = []
k = lasts[-1]
while k is not None:
result.append((btoa[k], k))
k = backpointers[k]
result.reverse()
return result
def _match_find(a, b, alo, blo, ahi, bhi, answer, maxrecursion):
if maxrecursion < 0:
logger.error('Internal merge error')
# this will never happen normally, this check is to prevent DOS attacks
return
oldlength = len(answer)
if alo == ahi or blo == bhi:
return
last_a_pos = alo - 1
last_b_pos = blo - 1
for apos, bpos in _lcs_unique(a[alo:ahi], b[blo:bhi]):
# recurse between lines which are unique in each file and match
apos += alo
bpos += blo
# Most of the time, you will have a sequence of similar entries
if last_a_pos + 1 != apos or last_b_pos + 1 != bpos:
_match_find(a, b, last_a_pos + 1, last_b_pos + 1, apos, bpos, answer, maxrecursion - 1)
last_a_pos = apos
last_b_pos = bpos
answer.append((apos, bpos))
if len(answer) > oldlength:
# find matches between the last match and the end
_match_find(a, b, last_a_pos + 1, last_b_pos + 1, ahi, bhi, answer, maxrecursion - 1)
elif a[alo] == b[blo]:
# find matching lines at the very beginning
while alo < ahi and blo < bhi and a[alo] == b[blo]:
answer.append((alo, blo))
alo += 1
blo += 1
_match_find(a, b, alo, blo, ahi, bhi, answer, maxrecursion - 1)
elif a[ahi - 1] == b[bhi - 1]:
# find matching lines at the very end
nahi = ahi - 1
nbhi = bhi - 1
while nahi > alo and nbhi > blo and a[nahi - 1] == b[nbhi - 1]:
nahi -= 1
nbhi -= 1
_match_find(a, b, last_a_pos + 1, last_b_pos + 1, nahi, nbhi, answer, maxrecursion - 1)
for i in xrange(ahi - nahi):
answer.append((nahi + i, nbhi + i))
def _collapse_sequences(matches):
answer = []
start_a = start_b = None
length = 0
for i_a, i_b in matches:
if start_a is not None and (i_a == start_a + length) and (i_b == start_b + length):
length += 1
else:
if start_a is not None:
answer.append((start_a, start_b, length))
start_a = i_a
start_b = i_b
length = 1
if length != 0:
answer.append((start_a, start_b, length))
return answer
def _compute_matchs(a, b):
matches = []
_match_find(a, b, 0, 0, len(a), len(b), matches, 10)
# Matches now has individual line pairs of
# line A matches line B, at the given offsets
matching_blocks = _collapse_sequences(matches)
matching_blocks.append((len(a), len(b), 0))
return matching_blocks
def _range_intersection(ra, rb):
sa = max(ra[0], rb[0])
sb = min(ra[1], rb[1])
if sa < sb:
return sa, sb
def _range_compare(a, astart, aend, b, bstart, bend):
if (aend - astart) != (bend - bstart):
return False
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
if a[ia] != b[ib]:
return False
else:
return True
def remove_diff_lines(diff):
return [line for line in diff if not line.startswith('?')]
def build_common_ancestor(source_lines, target_lines):
'''source is the origin of data
target is the place where the merge is being done, thus it is the most
important version and should take precedence in case of doubt'''
diff = difflib.Differ()
ds = diff.compare(source_lines, target_lines)
c = remove_diff_lines(ds)
common = []
for d in c:
if d[0] not in '-+':
common.append(d[2:])
return common
def three_way_merge_text(common, base, other, basename='base', othername='other'):
base = base.splitlines(1)
other = other.splitlines(1)
if not common:
common = build_common_ancestor(base, other)
else:
common = common.splitlines(1)
merger = TextMerger(None, basename, othername)
result = merger.merge(common, base, other)
return "".join(result), merger.collision
class TextMerger(object):
START_MARKER = '<<<<<<<<<<<<<<<<<<<<<<<<<'
MID_MARKER = '\n========================='
END_MARKER = '\n>>>>>>>>>>>>>>>>>>>>>>>>>'
def __init__(self, name_common=None, name_base=None, name_other=None):
self.name_common = name_common
self.name_base = name_base
self.name_other = name_other
self.collision = False
self.start_marker = TextMerger.START_MARKER
self.end_marker = TextMerger.END_MARKER
self.mid_marker = TextMerger.MID_MARKER
self.common_marker = None
if self.name_base:
self.start_marker += ' ' + str(self.name_base)
if self.name_other:
self.end_marker += ' ' + str(self.name_other)
def merge(self, common, base, other):
'''base, a, b are iterables of lines'''
newline = '\n'
if len(common) > 0:
if common[0].endswith('\r\n'):
newline = '\r\n'
elif common[0].endswith('\r'):
newline = '\r'
#if base_marker and reprocess:
# raise BiiException('Merge error')
mergeregions = _merge_regions(common, base, other)
for t in mergeregions:
what = t[0]
if what == 'eq':
for i in range(t[1], t[2]):
yield common[i]
elif what == 'base' or what == 'same':
for i in range(t[1], t[2]):
yield base[i]
elif what == 'other':
for i in range(t[1], t[2]):
yield other[i]
elif what == 'collide':
self.collision = True
yield self.start_marker + newline
for i in range(t[3], t[4]):
yield base[i]
if self.common_marker is not None:
yield self.common_marker + newline
for i in range(t[1], t[2]):
yield common[i]
yield self.mid_marker + newline
for i in range(t[5], t[6]):
yield other[i]
yield self.end_marker + newline
else:
raise BiiException('Internal merge error')
def _merge_regions(base, a, b):
# section a[0:ia] has been disposed of, etc
iz = ia = ib = 0
for zmatch, zend, amatch, aend, bmatch, bend in _find_regions(base, a, b):
matchlen = zend - zmatch
# invariants:
# matchlen >= 0
# matchlen == (aend - amatch)
# matchlen == (bend - bmatch)
len_a = amatch - ia
len_b = bmatch - ib
len_base = zmatch - iz
# invariants:
# assert len_a >= 0
# assert len_b >= 0
# assert len_base >= 0
# print 'unmatched a=%d, b=%d' % (len_a, len_b)
if len_a or len_b:
# try to avoid actually slicing the lists
same = _range_compare(a, ia, amatch, b, ib, bmatch)
if same:
yield 'same', ia, amatch
else:
equal_a = _range_compare(a, ia, amatch, base, iz, zmatch)
equal_b = _range_compare(b, ib, bmatch, base, iz, zmatch)
if equal_a and not equal_b:
yield 'other', ib, bmatch
elif equal_b and not equal_a:
yield 'base', ia, amatch
elif not equal_a and not equal_b:
yield 'collide', iz, zmatch, ia, amatch, ib, bmatch
else:
raise BiiException('Internal merge error')
ia = amatch
ib = bmatch
iz = zmatch
# if the same part of the base was deleted on both sides
# that's OK, we can just skip it.
if matchlen > 0:
# invariants:
# assert ia == amatch
# assert ib == bmatch
# assert iz == zmatch
yield 'eq', zmatch, zend
iz = zend
ia = aend
ib = bend
def _find_regions(base, a, b):
ia = ib = 0
amatches = _compute_matchs(base, a)
bmatches = _compute_matchs(base, b)
len_a = len(amatches)
len_b = len(bmatches)
sl = []
while ia < len_a and ib < len_b:
abase, amatch, alen = amatches[ia]
bbase, bmatch, blen = bmatches[ib]
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
i = _range_intersection((abase, abase + alen), (bbase, bbase + blen))
if i:
intbase = i[0]
intend = i[1]
intlen = intend - intbase
# found a match of base[i[0], i[1]]; this may be less than
# the region that matches in either one
# assert intlen <= alen
# assert intlen <= blen
# assert abase <= intbase
# assert bbase <= intbase
asub = amatch + (intbase - abase)
bsub = bmatch + (intbase - bbase)
aend = asub + intlen
bend = bsub + intlen
# assert base[intbase:intend] == a[asub:aend], \
# (base[intbase:intend], a[asub:aend])
# assert base[intbase:intend] == b[bsub:bend]
sl.append((intbase, intend,
asub, aend,
bsub, bend))
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
else:
ib += 1
intbase = len(base)
abase = len(a)
bbase = len(b)
sl.append((intbase, intbase, abase, abase, bbase, bbase))
return sl
|
1626426
|
from setuptools import setup
from setuptools import find_packages
setup(
name="emojipastabot",
description="Generate emojipasta from text.",
version="1.0.0",
url="https://github.com/Kevinpgalligan/EmojipastaBot",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules"
],
packages=find_packages("src"),
package_dir={'': 'src'},
package_data={'': ["*.txt", "*.json"]},
include_package_data=True,
install_requires=[
"emoji",
"praw>=5.0.0,<6.0.0"
]
)
|
1626433
|
from aiochan import *
def cleanup():
print('Cleaned up')
async def boring(msg, quit):
c = Chan()
async def work():
i = 0
while True:
_, ch = await select((c, f'{msg} {i}'), quit)
if ch is quit:
cleanup()
await quit.put('See you!')
return
i += 1
go(work())
return c
async def main():
quit = Chan()
c = await boring('Joe', quit)
for _ in range(10):
print(await c.get())
await quit.put('Bye')
print('Joe says: ' + await quit.get())
if __name__ == '__main__':
run_in_thread(main())
|
1626434
|
from PyObjCTools import NibClassBuilder, AppHelper
NibClassBuilder.extractClasses("MainMenu")
import MyView
AppHelper.runEventLoop()
|
1626436
|
del_items(0x80132930)
SetType(0x80132930, "struct Creds CreditsTitle[6]")
del_items(0x80132AD8)
SetType(0x80132AD8, "struct Creds CreditsSubTitle[28]")
del_items(0x80132F74)
SetType(0x80132F74, "struct Creds CreditsText[35]")
del_items(0x8013308C)
SetType(0x8013308C, "int CreditsTable[224]")
del_items(0x801342BC)
SetType(0x801342BC, "struct DIRENTRY card_dir[16][2]")
del_items(0x801347BC)
SetType(0x801347BC, "struct file_header card_header[16][2]")
del_items(0x801341E0)
SetType(0x801341E0, "struct sjis sjis_table[37]")
del_items(0x801396BC)
SetType(0x801396BC, "unsigned char save_buffer[106496]")
del_items(0x80139624)
SetType(0x80139624, "struct FeTable McLoadGameMenu")
del_items(0x80139604)
SetType(0x80139604, "char *CharFileList[5]")
del_items(0x80139618)
SetType(0x80139618, "char *Classes[3]")
del_items(0x80139640)
SetType(0x80139640, "struct FeTable McLoadCharMenu")
del_items(0x8013965C)
SetType(0x8013965C, "struct FeTable McLoadCard1Menu")
del_items(0x80139678)
SetType(0x80139678, "struct FeTable McLoadCard2Menu")
|
1626462
|
import cartography.intel.aws.ec2.volumes
import tests.data.aws.ec2.volumes
TEST_ACCOUNT_ID = '000000000000'
TEST_REGION = 'eu-west-1'
TEST_UPDATE_TAG = 123456789
def test_load_volumes(neo4j_session):
data = tests.data.aws.ec2.volumes.DESCRIBE_VOLUMES
cartography.intel.aws.ec2.volumes.load_volumes(
neo4j_session,
data,
TEST_REGION,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
expected_nodes = {
"v-01", "v-02",
}
nodes = neo4j_session.run(
"""
MATCH (r:EBSVolume) RETURN r.id;
""",
)
actual_nodes = {n['r.id'] for n in nodes}
assert actual_nodes == expected_nodes
def test_load_volumes_relationships(neo4j_session):
# Create Test AWSAccount
neo4j_session.run(
"""
MERGE (aws:AWSAccount{id: {aws_account_id}})
ON CREATE SET aws.firstseen = timestamp()
SET aws.lastupdated = {aws_update_tag}
""",
aws_account_id=TEST_ACCOUNT_ID,
aws_update_tag=TEST_UPDATE_TAG,
)
# Load Test Volumes
data = tests.data.aws.ec2.volumes.DESCRIBE_VOLUMES
cartography.intel.aws.ec2.volumes.load_volumes(
neo4j_session,
data,
TEST_REGION,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
expected = {
(TEST_ACCOUNT_ID, 'v-01'),
(TEST_ACCOUNT_ID, 'v-02'),
}
# Fetch relationships
result = neo4j_session.run(
"""
MATCH (n1:AWSAccount)-[:RESOURCE]->(n2:EBSVolume) RETURN n1.id, n2.id;
""",
)
actual = {
(r['n1.id'], r['n2.id']) for r in result
}
assert actual == expected
|
1626468
|
import logging
from datetime import timedelta
from typing import Optional, Iterable, List
from homeassistant.components.climate import SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, HVAC_MODE_OFF, \
HVAC_MODE_HEAT
from homeassistant.components.climate.const import HVAC_MODE_AUTO, HVAC_MODE_COOL, CURRENT_HVAC_IDLE, CURRENT_HVAC_HEAT, \
CURRENT_HVAC_OFF, CURRENT_HVAC_COOL
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from meross_iot.controller.device import BaseDevice
from meross_iot.controller.known.subdevice import Mts100v3Valve
from meross_iot.manager import MerossManager
from meross_iot.model.enums import OnlineStatus, Namespace, ThermostatV3Mode
from meross_iot.model.exception import CommandTimeoutError
from meross_iot.model.push.generic import GenericPushNotification
from .common import (PLATFORM, MANAGER, log_exception, RELAXED_SCAN_INTERVAL, calculate_valve_id,
extract_subdevice_notification_data)
# Conditional import for switch device
try:
from homeassistant.components.climate import ClimateEntity
except ImportError:
from homeassistant.components.climate import ClimateDevice as ClimateEntity
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
SCAN_INTERVAL = timedelta(seconds=RELAXED_SCAN_INTERVAL)
class ValveEntityWrapper(ClimateEntity):
"""Wrapper class to adapt the Meross switches into the Homeassistant platform"""
def __init__(self, device: Mts100v3Valve):
self._device = device
# If the current device has more than 1 channel, we need to setup the device name and id accordingly
self._id = calculate_valve_id(device.internal_id)
self._entity_name = "{} ({})".format(device.name, device.type)
# For now, we assume that every Meross Thermostat supports the following modes.
# This might be improved in the future by looking at the device abilities via get_abilities()
self._flags = 0
self._flags |= SUPPORT_TARGET_TEMPERATURE
self._flags |= SUPPORT_PRESET_MODE
# region Device wrapper common methods
async def async_update(self):
if self._device.online_status == OnlineStatus.ONLINE:
try:
await self._device.async_update()
except CommandTimeoutError as e:
log_exception(logger=_LOGGER, device=self._device)
pass
async def async_added_to_hass(self) -> None:
self._device.register_push_notification_handler_coroutine(self._async_push_notification_received)
self.hass.data[PLATFORM]["ADDED_ENTITIES_IDS"].add(self.unique_id)
async def _async_push_notification_received(self, namespace: Namespace, data: dict, device_internal_id: str):
update_state = False
full_update = False
if namespace == Namespace.CONTROL_UNBIND:
_LOGGER.warning(f"Received unbind event. Removing device {self.name} from HA")
await self.platform.async_remove_entity(self.entity_id)
elif namespace == Namespace.SYSTEM_ONLINE:
_LOGGER.warning(f"Device {self.name} reported online event.")
online = OnlineStatus(int(data.get('online').get('status')))
update_state = True
full_update = online == OnlineStatus.ONLINE
elif namespace == Namespace.HUB_ONLINE:
_LOGGER.warning(f"Device {self.name} reported (HUB) online event.")
online_event_data = extract_subdevice_notification_data(data=data,
filter_accessor='online',
subdevice_id=self._device.subdevice_id)
online = OnlineStatus(int(online_event_data.get('status')))
update_state = True
full_update = online == OnlineStatus.ONLINE
else:
update_state = True
full_update = False
# In all other cases, just tell HA to update the internal state representation
if update_state:
self.async_schedule_update_ha_state(force_refresh=full_update)
async def async_will_remove_from_hass(self) -> None:
self._device.unregister_push_notification_handler_coroutine(self._async_push_notification_received)
self.hass.data[PLATFORM]["ADDED_ENTITIES_IDS"].remove(self.unique_id)
# endregion
# region Device wrapper common properties
@property
def unique_id(self) -> str:
return self._id
@property
def name(self) -> str:
return self._entity_name
@property
def device_info(self):
return {
'identifiers': {(PLATFORM, self._device.internal_id)},
'name': self._device.name,
'manufacturer': 'Meross',
'model': self._device.type + " " + self._device.hardware_version,
'sw_version': self._device.firmware_version
}
@property
def available(self) -> bool:
# A device is available if the client library is connected to the MQTT broker and if the
# device we are contacting is online
return self._device.online_status == OnlineStatus.ONLINE
@property
def should_poll(self) -> bool:
return False
# endregion
# region Platform-specific command methods
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
# Turn on the device if not already on
if hvac_mode == HVAC_MODE_OFF:
await self._device.async_turn_off()
return
elif not self._device.is_on():
await self._device.async_turn_on()
if hvac_mode == HVAC_MODE_HEAT:
await self._device.async_set_mode(ThermostatV3Mode.HEAT)
elif hvac_mode == HVAC_MODE_AUTO:
await self._device.async_set_mode(ThermostatV3Mode.AUTO)
elif hvac_mode == HVAC_MODE_COOL:
await self._device.async_set_mode(ThermostatV3Mode.COOL)
else:
_LOGGER.warning(f"Unsupported mode for this device ({self.name}): {hvac_mode}")
async def async_set_preset_mode(self, preset_mode: str) -> None:
await self._device.async_set_mode(ThermostatV3Mode[preset_mode])
async def async_set_temperature(self, **kwargs):
target = kwargs.get('temperature')
await self._device.async_set_target_temperature(target)
# endregion
# region Platform specific properties
@property
def temperature_unit(self) -> str:
# TODO: Check if there is a way for retrieving the Merasurement Unit from the library
return TEMP_CELSIUS
@property
def current_temperature(self) -> Optional[float]:
return self._device.last_sampled_temperature
@property
def target_temperature(self) -> Optional[float]:
return self._device.target_temperature
@property
def target_temperature_step(self) -> Optional[float]:
return 0.5
@property
def max_temp(self) -> Optional[float]:
return self._device.max_supported_temperature
@property
def min_temp(self) -> Optional[float]:
return self._device.min_supported_temperature
@property
def hvac_mode(self) -> str:
if not self._device.is_on():
return HVAC_MODE_OFF
elif self._device.mode == ThermostatV3Mode.AUTO:
return HVAC_MODE_AUTO
elif self._device.mode == ThermostatV3Mode.HEAT:
return HVAC_MODE_HEAT
elif self._device.mode == ThermostatV3Mode.COOL:
return HVAC_MODE_COOL
elif self._device.mode == ThermostatV3Mode.ECONOMY:
return HVAC_MODE_AUTO
elif self._device.mode == ThermostatV3Mode.CUSTOM:
if self._device.last_sampled_temperature < self._device.target_temperature:
return HVAC_MODE_HEAT
else:
return HVAC_MODE_COOL
else:
raise ValueError("Unsupported thermostat mode reported.")
@property
def hvac_action(self) -> Optional[str]:
if not self._device.is_on():
return CURRENT_HVAC_OFF
elif self._device.is_heating:
return CURRENT_HVAC_HEAT
elif self._device.mode == HVAC_MODE_COOL:
return CURRENT_HVAC_COOL
else:
return CURRENT_HVAC_IDLE
@property
def hvac_modes(self) -> List[str]:
return [HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_COOL]
@property
def preset_mode(self) -> str:
return self._device.mode.name
@property
def preset_modes(self) -> List[str]:
return [e.name for e in ThermostatV3Mode]
@property
def supported_features(self):
return self._flags
# endregion
async def _add_entities(hass: HomeAssistant, devices: Iterable[BaseDevice], async_add_entities):
new_entities = []
# Identify all the Mts100V3Valves
devs = filter(lambda d: isinstance(d, Mts100v3Valve), devices)
for d in devs:
w = ValveEntityWrapper(device=d)
if w.unique_id not in hass.data[PLATFORM]["ADDED_ENTITIES_IDS"]:
new_entities.append(w)
else:
_LOGGER.info(f"Skipping device {w} as it was already added to registry once.")
async_add_entities(new_entities, True)
async def async_setup_entry(hass, config_entry, async_add_entities):
manager = hass.data[PLATFORM][MANAGER] # type:MerossManager
devices = manager.find_devices()
await _add_entities(hass=hass, devices=devices, async_add_entities=async_add_entities)
# Register a listener for the Bind push notification so that we can add new entities at runtime
async def platform_async_add_entities(push_notification: GenericPushNotification, target_devices: List[BaseDevice]):
if push_notification.namespace == Namespace.CONTROL_BIND \
or push_notification.namespace == Namespace.SYSTEM_ONLINE \
or push_notification.namespace == Namespace.HUB_ONLINE:
# TODO: Discovery needed only when device becomes online?
await manager.async_device_discovery(push_notification.namespace == Namespace.HUB_ONLINE,
meross_device_uuid=push_notification.originating_device_uuid)
devs = manager.find_devices(device_uuids=(push_notification.originating_device_uuid,))
await _add_entities(hass=hass, devices=devs, async_add_entities=async_add_entities)
# Register a listener for new bound devices
manager.register_push_notification_handler_coroutine(platform_async_add_entities)
# TODO: Unload entry
# TODO: Remove entry
def setup_platform(hass, config, async_add_entities, discovery_info=None):
pass
|
1626483
|
from unittest import TestCase
from simplejson.compat import StringIO, long_type, b, binary_type, PY3
import simplejson as json
def as_text_type(s):
if PY3 and isinstance(s, binary_type):
return s.decode('ascii')
return s
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_constants(self):
for c in [None, True, False]:
self.assertTrue(json.loads(json.dumps(c)) is c)
self.assertTrue(json.loads(json.dumps([c]))[0] is c)
self.assertTrue(json.loads(json.dumps({'a': c}))['a'] is c)
def test_stringify_key(self):
items = [(b('bytes'), 'bytes'),
(1.0, '1.0'),
(10, '10'),
(True, 'true'),
(False, 'false'),
(None, 'null'),
(long_type(100), '100')]
for k, expect in items:
self.assertEqual(
json.loads(json.dumps({k: expect})),
{expect: expect})
self.assertEqual(
json.loads(json.dumps({k: expect}, sort_keys=True)),
{expect: expect})
self.assertRaises(TypeError, json.dumps, {json: 1})
for v in [{}, {'other': 1}, {b('derp'): 1, 'herp': 2}]:
for sort_keys in [False, True]:
v0 = dict(v)
v0[json] = 1
v1 = dict((as_text_type(key), val) for (key, val) in v.items())
self.assertEqual(
json.loads(json.dumps(v0, skipkeys=True, sort_keys=sort_keys)),
v1)
self.assertEqual(
json.loads(json.dumps({'': v0}, skipkeys=True, sort_keys=sort_keys)),
{'': v1})
self.assertEqual(
json.loads(json.dumps([v0], skipkeys=True, sort_keys=sort_keys)),
[v1])
def test_dumps(self):
self.assertEqual(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEqual(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(
json.dumps(
{2: 3.0,
4.0: long_type(5),
False: 1,
long_type(6): True,
"7": 0},
sort_keys=True),
'{"2": 3.0, "4.0": 5, "6": true, "7": 0, "false": 1}')
def test_ordered_dict(self):
# http://bugs.python.org/issue6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = json.dumps(json.OrderedDict(items))
self.assertEqual(
s,
'{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
def test_indent_unknown_type_acceptance(self):
"""
A test against the regression mentioned at `github issue 29`_.
The indent parameter should accept any type which pretends to be
an instance of int or long when it comes to being multiplied by
strings, even if it is not actually an int or long, for
backwards compatibility.
.. _github issue 29:
http://github.com/simplejson/simplejson/issue/29
"""
class AwesomeInt(object):
"""An awesome reimplementation of integers"""
def __init__(self, *args, **kwargs):
if len(args) > 0:
# [construct from literals, objects, etc.]
# ...
# Finally, if args[0] is an integer, store it
if isinstance(args[0], int):
self._int = args[0]
# [various methods]
def __mul__(self, other):
# [various ways to multiply AwesomeInt objects]
# ... finally, if the right-hand operand is not awesome enough,
# try to do a normal integer multiplication
if hasattr(self, '_int'):
return self._int * other
else:
raise NotImplementedError("To do non-awesome things with"
" this object, please construct it from an integer!")
s = json.dumps([0, 1, 2], indent=AwesomeInt(3))
self.assertEqual(s, '[\n 0,\n 1,\n 2\n]')
def test_accumulator(self):
# the C API uses an accumulator that collects after 100,000 appends
lst = [0] * 100000
self.assertEqual(json.loads(json.dumps(lst)), lst)
def test_sort_keys(self):
# https://github.com/simplejson/simplejson/issues/106
for num_keys in range(2, 32):
p = dict((str(x), x) for x in range(num_keys))
sio = StringIO()
json.dump(p, sio, sort_keys=True)
self.assertEqual(sio.getvalue(), json.dumps(p, sort_keys=True))
self.assertEqual(json.loads(sio.getvalue()), p)
|
1626497
|
import logging
import math
from copy import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import _calculate_fan_in_and_fan_out
def extract_top_level_dict(current_dict):
"""
Builds a graph dictionary from the passed depth_keys, value pair. Useful for dynamically passing external params
:param depth_keys: A list of strings making up the name of a variable. Used to make a graph for that params tree.
:param value: Param value
:param key_exists: If none then assume new dict, else load existing dict and add new key->value pairs to it.
:return: A dictionary graph of the params already added to the graph.
"""
output_dict = dict()
for key in current_dict.keys():
name = key.replace("layer_dict.", "")
name = name.replace("layer_dict.", "")
name = name.replace("block_dict.", "")
name = name.replace("module-", "")
top_level = name.split(".")[0]
sub_level = ".".join(name.split(".")[1:])
if top_level not in output_dict:
if sub_level == "":
output_dict[top_level] = current_dict[key]
else:
output_dict[top_level] = {sub_level: current_dict[key]}
else:
new_item = {key: value for key, value in output_dict[top_level].items()}
new_item[sub_level] = current_dict[key]
output_dict[top_level] = new_item
return output_dict
def extract_params_and_check_for_missing_keys(current_dict, layer_dict):
params_dict = extract_top_level_dict(current_dict=current_dict)
for key in layer_dict.keys():
if key not in params_dict:
params_dict[key] = None
return params_dict
class MetaConv1dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_bias, groups=1, dilation_rate=1):
"""
A MetaConv1D layer. Applies the same functionality of a standard Conv2D layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Convolutional kernel size
:param stride: Convolutional stride
:param padding: Convolution padding
:param use_bias: Boolean indicating whether to use a bias or not.
"""
super(MetaConv1dLayer, self).__init__()
num_filters = out_channels
self.stride = int(stride)
self.padding = int(padding)
self.dilation_rate = int(dilation_rate)
self.use_bias = use_bias
self.weight = nn.Parameter(torch.empty(num_filters, in_channels, kernel_size))
nn.init.xavier_uniform_(self.weight)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters))
self.groups = groups
def forward(self, x, params=None):
"""
Applies a conv2D forward pass. If params are not None will use the passed params as the conv weights and biases
:param x: Input image batch.
:param params: If none, then conv layer will use the stored self.weights and self.bias, if they are not none
then the conv layer will use the passed params as its parameters.
:return: The output of a convolutional function.
"""
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weight"], params["bias"]
else:
(weight) = params["weight"]
bias = None
else:
if self.use_bias:
weight, bias = self.weight, self.bias
else:
weight = self.weight
bias = None
out = F.conv1d(input=x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, dilation=self.dilation_rate, groups=self.groups)
return out
class MetaConv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_bias, groups=1, dilation_rate=1):
"""
A MetaConv1D layer. Applies the same functionality of a standard Conv2D layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Convolutional kernel size
:param stride: Convolutional stride
:param padding: Convolution padding
:param use_bias: Boolean indicating whether to use a bias or not.
"""
super(MetaConv2dLayer, self).__init__()
num_filters = out_channels
self.stride = stride
self.padding = int(padding)
self.dilation_rate = int(dilation_rate)
self.use_bias = use_bias
self.weight = nn.Parameter(torch.empty(num_filters, in_channels, kernel_size, kernel_size), requires_grad=True)
nn.init.xavier_uniform_(self.weight)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters), requires_grad=True)
self.groups = groups
def forward(self, x, params=None):
"""
Applies a conv2D forward pass. If params are not None will use the passed params as the conv weights and biases
:param x: Input image batch.
:param params: If none, then conv layer will use the stored self.weights and self.bias, if they are not none
then the conv layer will use the passed params as its parameters.
:return: The output of a convolutional function.
"""
if params is not None:
# print([key for key in params.keys()])
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weight"], params["bias"]
else:
(weight) = params["weight"]
bias = None
else:
if self.use_bias:
weight, bias = self.weight, self.bias
else:
weight = self.weight
bias = None
out = F.conv2d(input=x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, dilation=self.dilation_rate, groups=self.groups)
return out
class MetaLinearLayer(nn.Module):
def __init__(self, input_shape, num_filters, use_bias):
"""
A MetaLinear layer. Applies the same functionality of a standard linearlayer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the linear layer. Useful for inner loop optimization in the meta
learning setting.
:param input_shape: The shape of the input data, in the form (b, f)
:param num_filters: Number of output filters
:param use_bias: Whether to use biases or not.
"""
super(MetaLinearLayer, self).__init__()
self.input_shape = input_shape
b, c = input_shape[:2]
self.use_bias = use_bias
self.weights = nn.Parameter(torch.empty(num_filters, c))
nn.init.xavier_uniform_(self.weights)
logging.debug("debug message", self.weights)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters))
def forward(self, x, params=None):
"""
Forward propagates by applying a linear function (Wx + b). If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param x: Input data batch, in the form (b, f)
:param params: A dictionary containing 'weights' and 'bias'. If params are none then internal params are used.
Otherwise the external are used.
:return: The result of the linear function.
"""
# print(x.shape)
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weights"], params["bias"]
else:
(weight) = params["weights"]
bias = None
# print(x.shape, params['weights'].shape)
else:
if self.use_bias:
weight, bias = self.weights, self.bias
else:
weight = self.weights
bias = None
# print(x.shape)
out = F.linear(input=x, weight=weight, bias=bias)
# print(out.shape, weight.shape, self.input_shape)
return out
def reset_parameters(self):
self.weights.data = self.weights.data * 0.
fan_in, fan_out = _calculate_fan_in_and_fan_out(self.weights)
std = 1. * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
a_array = torch.ones(self.weights.shape) * a
a_array.to(self.weights.device)
self.weights.data = self.weights.data + torch.distributions.Uniform(low=-a_array, high=a_array).rsample().to(
self.weights.device)
if self.use_bias:
self.bias.data = self.bias.data * 0.
class MetaBatchNormLayer(nn.Module):
def __init__(self, num_features, num_support_set_steps, num_target_set_steps,
eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
use_per_step_bn_statistics=False, learnable_bn_gamma=True, learnable_bn_beta=True):
"""
A MetaBatchNorm layer. Applies the same functionality of a standard BatchNorm layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting. Also has the additional functionality of being able to store per step running stats and per step beta and gamma.
"""
super(MetaBatchNormLayer, self).__init__()
self.num_features = num_features
self.eps = eps
self.affine = affine
self.track_running_stats = track_running_stats
self.num_features = num_features
self.use_per_step_bn_statistics = use_per_step_bn_statistics
self.learnable_gamma = learnable_bn_gamma
self.learnable_beta = learnable_bn_beta
if use_per_step_bn_statistics:
self.running_mean = nn.Parameter(
torch.zeros(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=False)
self.running_var = nn.Parameter(
torch.ones(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=False)
self.bias = nn.Parameter(
torch.zeros(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(
torch.ones(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=self.learnable_gamma)
else:
self.running_mean = nn.Parameter(torch.zeros(num_features), requires_grad=False)
self.running_var = nn.Parameter(torch.zeros(num_features), requires_grad=False)
self.bias = nn.Parameter(torch.zeros(num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(torch.ones(num_features),
requires_grad=self.learnable_gamma)
self.backup_running_mean = torch.zeros(self.running_mean.shape)
self.backup_running_var = torch.ones(self.running_var.shape)
self.momentum = momentum
def forward(self, input, num_step, training=False, backup_running_statistics=False):
"""
Forward propagates by applying a bach norm function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
if self.use_per_step_bn_statistics:
running_mean = self.running_mean[num_step]
running_var = self.running_var[num_step]
weight, bias = self.weight[num_step], self.bias[num_step]
# print(num_step)
else:
running_mean = self.running_mean
running_var = self.running_var
weight, bias = self.weight, self.bias
if backup_running_statistics and self.use_per_step_bn_statistics:
self.backup_running_mean.data = copy(self.running_mean.data)
self.backup_running_var.data = copy(self.running_var.data)
momentum = self.momentum
# print(running_mean.shape, running_var.shape)
output = F.batch_norm(input, running_mean, running_var, weight, bias,
training=True, momentum=momentum, eps=self.eps)
return output
def restore_backup_stats(self):
"""
Resets batch statistics to their backup values which are collected after each forward pass.
"""
if self.use_per_step_bn_statistics:
self.running_mean = nn.Parameter(self.backup_running_mean, requires_grad=False)
self.running_var = nn.Parameter(self.backup_running_var, requires_grad=False)
self.to(self.weight.device)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
class MetaConvNormLayerLeakyReLU(nn.Module):
def __init__(self, input_shape, num_filters, kernel_size, stride, padding, use_bias, per_step_bn_statistics,
num_support_set_steps, num_target_set_steps,
use_normalization=True, groups=1):
"""
Initializes a BatchNorm->Conv->ReLU layer which applies those operation in that order.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run the layer on.
:param normalization: The type of normalization to use 'batch_norm' or 'layer_norm'
:param meta_layer: Whether this layer will require meta-layer capabilities such as meta-batch norm,
meta-conv etc.
:param input_shape: The image input shape in the form (b, c, h, w)
:param num_filters: number of filters for convolutional layer
:param kernel_size: the kernel size of the convolutional layer
:param stride: the stride of the convolutional layer
:param padding: the bias of the convolutional layer
:param use_bias: whether the convolutional layer utilizes a bias
"""
super(MetaConvNormLayerLeakyReLU, self).__init__()
self.input_shape = input_shape
self.use_normalization = use_normalization
self.use_per_step_bn_statistics = per_step_bn_statistics
self.num_filters = num_filters
self.kernel_size = kernel_size
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.stride = stride
self.groups = groups
self.padding = padding
self.use_bias = use_bias
self.layer_dict = nn.ModuleDict()
self.build_block()
def build_block(self):
x = torch.zeros(self.input_shape)
out = x
self.conv = MetaConv2dLayer(in_channels=out.shape[1], out_channels=self.num_filters,
kernel_size=self.kernel_size,
stride=self.stride, padding=self.padding, use_bias=self.use_bias,
groups=self.groups)
out = self.conv(out)
if type(out) == tuple:
out, _ = out
if self.use_normalization:
self.norm_layer = MetaBatchNormLayer(num_features=out.shape[1], track_running_stats=True,
use_per_step_bn_statistics=self.use_per_step_bn_statistics,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
# print(out.shape)
out = self.norm_layer.forward(out, num_step=0)
out = F.leaky_relu(out)
print(out.shape)
def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propagates by applying the function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
conv_params = None
if params is not None:
params = {key: value for key, value in params.items()}
params = extract_top_level_dict(current_dict=params)
conv_params = params['conv']
# if params is not None:
# print([key for key in params.keys()])
# else:
# print(None)
out = x
out = self.conv(out, params=conv_params)
if type(out) == tuple:
out, _ = out
if self.use_normalization:
out = self.norm_layer.forward(out, num_step=num_step,
training=training,
backup_running_statistics=backup_running_statistics)
out = F.leaky_relu(out)
return out
def restore_backup_stats(self):
"""
Restore stored statistics from the backup, replacing the current ones.
"""
if self.normalization:
self.norm_layer.restore_backup_stats()
class VGGActivationNormNetwork(nn.Module):
def __init__(self, input_shape, num_output_classes, use_channel_wise_attention,
num_stages, num_filters, num_support_set_steps, num_target_set_steps):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities should
be enabled.
"""
super(VGGActivationNormNetwork, self).__init__()
self.total_layers = 0
self.upscale_shapes = []
self.num_filters = num_filters
self.num_stages = num_stages
self.input_shape = input_shape
self.use_channel_wise_attention = use_channel_wise_attention
self.num_output_classes = num_output_classes
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.build_network()
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
self.layer_dict = nn.ModuleDict()
for i in range(self.num_stages):
self.layer_dict['conv_{}'.format(i)] = MetaConvNormLayerLeakyReLU(input_shape=out.shape,
num_filters=self.num_filters,
kernel_size=3, stride=1,
padding=1,
use_bias=True,
groups=1, per_step_bn_statistics=True,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['conv_{}'.format(i)](out, training=True, num_step=0)
out = F.max_pool2d(input=out, kernel_size=2, stride=2, padding=0)
out = out.view((out.shape[0], -1))
if type(self.num_output_classes) == list:
for idx, num_output_classes in enumerate(self.num_output_classes):
self.layer_dict['linear_{}'.format(idx)] = MetaLinearLayer(input_shape=out.shape,
num_filters=num_output_classes,
use_bias=True)
pred = self.layer_dict['linear_{}'.format(idx)](out)
else:
self.layer_dict['linear'] = MetaLinearLayer(input_shape=out.shape,
num_filters=self.num_output_classes, use_bias=True)
out = self.layer_dict['linear'](out)
print("VGGNetwork build", out.shape)
def forward(self, x, num_step, dropout_training=None, params=None, training=False,
backup_running_statistics=False, return_features=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
params = {key: value[0] for key, value in params.items()}
# print([key for key, value in param_dict.items()])
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
# print([key for key, value in param_dict.items() if value is not None])
for i in range(self.num_stages):
out = self.layer_dict['conv_{}'.format(i)](out, params=param_dict['conv_{}'.format(i)], training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
features = out
out = out.view(out.size(0), -1)
if type(self.num_output_classes) == list:
pred_list = []
for idx, num_output_classes in enumerate(self.num_output_classes):
cur_pred = self.layer_dict['linear_{}'.format(idx)](out, params=param_dict['linear_{}'.format(idx)])
pred_list.append(cur_pred)
out = pred_list
else:
out = self.layer_dict['linear'](out, params=param_dict['linear'])
if return_features:
return out, features
else:
return out
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for name, module in self.named_modules():
if type(module) == MetaBatchNormLayer:
module.restore_backup_stats()
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
class FCCActivationNormNetwork(nn.Module):
def __init__(self, im_shape, num_output_classes, args, device, use_bn, num_stages=None, use_bias=True,
meta_classifier=True):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities should
be enabled.
"""
super(FCCActivationNormNetwork, self).__init__()
self.device = device
self.args = args
self.input_shape = list(im_shape)
self.num_output_classes = num_output_classes
self.meta_classifier = meta_classifier
self.num_stages = num_stages
self.use_bias = use_bias
self.use_bn = use_bn
self.build_network()
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
out = out.view(out.size(0), -1)
self.layer_dict = nn.ModuleDict()
for i in range(self.num_stages):
self.layer_dict['fcc_{}'.format(i)] = MetaLinearLayer(input_shape=out.shape, num_filters=40, use_bias=False)
out = self.layer_dict['fcc_{}'.format(i)].forward(out)
if self.use_bn:
self.layer_dict['fcc_bn_{}'.format(i)] = MetaBatchNormLayer(num_features=out.shape[1], args=self.args,
use_per_step_bn_statistics=True)
out = self.layer_dict['fcc_bn_{}'.format(i)].forward(out, num_step=0)
out = F.leaky_relu(out)
out = out.view(out.shape[0], -1)
self.layer_dict['preds_linear'] = MetaLinearLayer(input_shape=(out.shape[0], np.prod(out.shape[1:])),
num_filters=self.num_output_classes, use_bias=self.use_bias)
out = self.layer_dict['preds_linear'](out)
print("FCCActivationNormNetwork build", out.shape)
def forward(self, x, num_step, params=None, training=False,
backup_running_statistics=False, return_features=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
params = {key: value[0] for key, value in params.items()}
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
out = out.view(out.size(0), -1)
for i in range(self.num_stages):
out = self.layer_dict['fcc_{}'.format(i)](out, params=param_dict['fcc_{}'.format(i)])
if self.use_bn:
out = self.layer_dict['fcc_bn_{}'.format(i)].forward(out, num_step=num_step,
params=None, training=training,
backup_running_statistics=backup_running_statistics)
out = F.leaky_relu(out)
features = out
out = out.view(out.size(0), -1)
out = self.layer_dict['preds_linear'](out, param_dict['preds_linear'])
if return_features:
return out, features
else:
return out
def reset_parameters(self):
for name, module in self.named_modules():
if type(module) == MetaLinearLayer:
# print("reset", name)
module.reset_parameters()
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for name, module in self.named_modules():
if type(module) == MetaBatchNormLayer:
module.restore_backup_stats()
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
class SqueezeExciteLayer(nn.ModuleDict):
def __init__(self, input_shape, num_filters, num_layers, num_support_set_steps, num_target_set_steps):
super(SqueezeExciteLayer, self).__init__()
self.input_shape = input_shape
self.num_filters = num_filters
self.num_layers = num_layers
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.build_block()
def build_block(self):
self.layer_dict = nn.ModuleDict()
x_dummy = torch.zeros(self.input_shape)
out = x_dummy
out = F.avg_pool2d(out, out.shape[-1]).squeeze()
for i in range(self.num_layers - 1):
self.layer_dict['attention_network_hidden_{}'.format(i)] = MetaLinearLayer(input_shape=out.shape,
use_bias=True,
num_filters=self.num_filters)
out = self.layer_dict['attention_network_hidden_{}'.format(i)].forward(out, params=None)
self.layer_dict['LeakyReLU_{}'.format(i)] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_{}'.format(i)].forward(out)
self.layer_dict['attention_network_output_layer'] = MetaLinearLayer(input_shape=out.shape, use_bias=True,
num_filters=x_dummy.shape[1])
channel_wise_attention_regions = self.layer_dict[
'attention_network_output_layer'].forward(
out, params=None)
channel_wise_attention_regions = F.sigmoid(channel_wise_attention_regions)
out = x_dummy * channel_wise_attention_regions.unsqueeze(2).unsqueeze(2)
print('Built', type(self), 'with output', out.shape, self)
def forward(self, x, num_step=0, params=None):
param_dict = dict()
if params is not None:
params = {key: value for key, value in params.items()}
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
out = F.avg_pool2d(out, out.shape[-1]).squeeze()
for i in range(self.num_layers - 1):
# print(out.shape)
out = self.layer_dict[
'attention_network_hidden_{}'.format(i)].forward(
out, params=param_dict['attention_network_hidden_{}'.format(i)])
out = self.layer_dict['LeakyReLU_{}'.format(i)].forward(out)
# print(out.shape)
channel_wise_attention_regions = self.layer_dict[
'attention_network_output_layer'].forward(
out, params=param_dict['attention_network_output_layer'])
channel_wise_attention_regions = F.sigmoid(channel_wise_attention_regions)
out = x * channel_wise_attention_regions.unsqueeze(2).unsqueeze(2)
return out
class VGGActivationNormNetworkWithAttention(nn.Module):
def __init__(self, input_shape, num_output_classes, use_channel_wise_attention,
num_stages, num_filters, num_support_set_steps, num_target_set_steps, num_blocks_per_stage):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities should
be enabled.
"""
super(VGGActivationNormNetworkWithAttention, self).__init__()
self.total_layers = 0
self.upscale_shapes = []
self.num_filters = num_filters
self.num_stages = num_stages
self.input_shape = input_shape
self.use_channel_wise_attention = use_channel_wise_attention
self.num_output_classes = num_output_classes
self.num_blocks_per_stage = num_blocks_per_stage
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.build_network()
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
self.layer_dict = nn.ModuleDict()
for i in range(self.num_stages):
for j in range(self.num_blocks_per_stage):
if self.use_channel_wise_attention:
self.layer_dict['attention_layer_{}_{}'.format(i, j)] = SqueezeExciteLayer(input_shape=out.shape,
num_filters=0,
num_layers=0,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['attention_layer_{}_{}'.format(i, j)].forward(out)
self.layer_dict['conv_{}_{}'.format(i, j)] = MetaConvNormLayerLeakyReLU(input_shape=out.shape,
num_filters=self.num_filters,
kernel_size=3, stride=1,
padding=1,
use_bias=True,
groups=1,
per_step_bn_statistics=True,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['conv_{}_{}'.format(i, j)](out, training=True, num_step=0)
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
if self.use_channel_wise_attention:
self.layer_dict['attention_pre_logit_layer'] = SqueezeExciteLayer(input_shape=out.shape,
num_filters=0,
num_layers=0,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['attention_pre_logit_layer'].forward(out)
features_avg = F.avg_pool2d(out, out.shape[-1]).squeeze()
out = features_avg
self.layer_dict['linear'] = MetaLinearLayer(input_shape=out.shape,
num_filters=self.num_output_classes, use_bias=True)
out = self.layer_dict['linear'](out)
print("VGGNetwork build", out.shape)
def forward(self, x, num_step, dropout_training=None, params=None, training=False,
backup_running_statistics=False, return_features=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
params = {key: value[0] for key, value in params.items()}
# print([key for key, value in param_dict.items()])
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
# print([key for key, value in param_dict.items() if value is not None])
for i in range(self.num_stages):
for j in range(self.num_blocks_per_stage):
if self.use_channel_wise_attention:
out = self.layer_dict['attention_layer_{}_{}'.format(i, j)].forward(out, num_step=num_step,
params=param_dict[
'attention_layer_{}_{}'.format(
i, j)])
out = self.layer_dict['conv_{}_{}'.format(i, j)](out, training=True, num_step=num_step,
params=param_dict['conv_{}_{}'.format(i, j)])
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
if self.use_channel_wise_attention:
out = self.layer_dict['attention_pre_logit_layer'].forward(out, params=param_dict[
'attention_pre_logit_layer'])
features = out
features_avg = F.avg_pool2d(out, out.shape[-1]).squeeze()
# out = F.avg_pool2d(out, out.shape[-1])
# out = self.layer_dict['relational_pool'].forward(out, params=param_dict['relational_pool'], num_step=num_step)
out = features_avg
out = self.layer_dict['linear'](out, param_dict['linear'])
if return_features:
return out, features
else:
return out
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for name, module in self.named_modules():
if type(module) == MetaBatchNormLayer:
module.restore_backup_stats()
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
class MetaBatchRelationalModule(nn.Module):
def __init__(self, input_shape, use_coordinates=True, num_support_set_steps=0, num_target_set_steps=0,
output_units=32):
super(MetaBatchRelationalModule, self).__init__()
self.input_shape = input_shape
self.layer_dict = nn.ModuleDict()
self.first_time = True
self.use_coordinates = use_coordinates
self.num_target_set_steps = num_target_set_steps
self.num_support_set_steps = num_support_set_steps
self.output_units = output_units
self.build_block()
def build_block(self):
out_img = torch.zeros(self.input_shape)
"""g"""
if len(out_img.shape) > 3:
b, c, h, w = out_img.shape
if h > 5:
out_img = F.adaptive_avg_pool2d(out_img, output_size=5)
print(out_img.shape)
b, c, h, w = out_img.shape
out_img = out_img.view(b, c, h * w)
out_img = out_img.permute([0, 2, 1]) # h*w, c
b, length, c = out_img.shape
print(out_img.shape)
# x_flat = (64 x 25 x 24)
if self.use_coordinates:
self.coord_tensor = []
for i in range(length):
self.coord_tensor.append(torch.Tensor(np.array([i])))
self.coord_tensor = torch.stack(self.coord_tensor, dim=0).unsqueeze(0)
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
out_img = torch.cat([out_img, self.coord_tensor], dim=2)
x_i = torch.unsqueeze(out_img, 1) # (1xh*wxc)
x_i = x_i.repeat(1, length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 2) # (h*wx1xc)
x_j = x_j.repeat(1, 1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 3) # (h*wxh*wx2*c)
out = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1] * per_location_feature.shape[2],
per_location_feature.shape[3])
# print(out.shape)
for idx_layer in range(2):
# print('test', out.shape)
self.layer_dict['g_fcc_{}'.format(idx_layer)] = MetaLinearLayer(input_shape=out.shape, num_filters=64,
use_bias=True)
out = self.layer_dict['g_fcc_{}'.format(idx_layer)].forward(out)
self.layer_dict['LeakyReLU_{}'.format(idx_layer)] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
# reshape again and sum
print(out.shape)
out = out.view(per_location_feature.shape[0], per_location_feature.shape[1], per_location_feature.shape[2], -1)
out = out.sum(1).sum(1)
print('here', out.shape)
"""f"""
self.layer_dict['post_processing_layer'] = MetaLinearLayer(input_shape=out.shape, num_filters=64, use_bias=True)
out = self.layer_dict['post_processing_layer'].forward(out)
self.layer_dict['LeakyReLU_post_processing'] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_post_processing'].forward(out)
self.layer_dict['output_layer'] = MetaLinearLayer(input_shape=out.shape, num_filters=self.output_units,
use_bias=True)
out = self.layer_dict['output_layer'].forward(out)
self.layer_dict['LeakyReLU_output'] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_output'].forward(out)
print('Block built with output volume shape', out.shape)
def forward(self, x_img, num_step, params=None):
param_dict = dict()
if params is not None:
params = {key: value for key, value in params.items()}
# print([key for key, value in param_dict.items()])
param_dict = extract_top_level_dict(current_dict=params)
# print(list(params.keys()))
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out_img = x_img
# print("input", out_img.shape)
"""g"""
if len(out_img.shape) > 3:
b, c, h, w = out_img.shape
if h > 5:
out_img = F.adaptive_avg_pool2d(out_img, output_size=5)
b, c, h, w = out_img.shape
out_img = out_img.view(b, c, h * w)
out_img = out_img.permute([0, 2, 1]) # h*w, c
b, length, c = out_img.shape
if self.use_coordinates:
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
out_img = torch.cat([out_img, self.coord_tensor.to(x_img.device)], dim=2)
# x_flat = (64 x 25 x 24)
# print('out_img', out_img.shape)
x_i = torch.unsqueeze(out_img, 1) # (1xh*wxc)
x_i = x_i.repeat(1, length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 2) # (h*wx1xc)
x_j = x_j.repeat(1, 1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 3) # (h*wxh*wx2*c)
out = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1] * per_location_feature.shape[2],
per_location_feature.shape[3])
# print(out.shape)
for idx_layer in range(2):
# print('test', out.shape)
# print(param_dict['g_fcc_{}'.format(idx_layer)])
out = self.layer_dict['g_fcc_{}'.format(idx_layer)].forward(out,
params=param_dict['g_fcc_{}'.format(idx_layer)])
# print('test', out.shape)
out = self.layer_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
# reshape again and sum
# print(out.shape)
out = out.view(per_location_feature.shape[0], per_location_feature.shape[1], per_location_feature.shape[2], -1)
out = out.sum(1).sum(1)
"""f"""
out = self.layer_dict['post_processing_layer'].forward(out, params=param_dict['post_processing_layer'])
out = self.layer_dict['LeakyReLU_post_processing'].forward(out)
out = self.layer_dict['output_layer'].forward(out, params=param_dict['output_layer'])
out = self.layer_dict['LeakyReLU_output'].forward(out)
return out
|
1626502
|
import pytest
def test_blurhash_decode(api):
fake_media_dict = {
'width': 320,
'height': 240,
'blurhash': '=~NdOWof1PbIPUXSvgbI$f'
}
decoded_image = api.decode_blurhash(fake_media_dict)
assert len(decoded_image) == 9 * 16
assert len(decoded_image[0]) == 16
decoded_image_2 = api.decode_blurhash(
fake_media_dict,
out_size = (fake_media_dict["width"], fake_media_dict["height"]),
size_per_component = False,
return_linear = False
)
assert len(decoded_image_2) == 240
assert len(decoded_image_2[0]) == 320
|
1626505
|
try:
import OpenGL.GL as gl
except:
from galry import log_warn
log_warn(("PyOpenGL is not available and Galry won't be"
" able to render plots."))
class _gl(object):
def mock(*args, **kwargs):
return None
def __getattr__(self, name):
return self.mock
gl = _gl()
from collections import OrderedDict
import numpy as np
import sys
from galry import enforce_dtype, DataNormalizer, log_info, log_debug, \
log_warn, RefVar
__all__ = ['GLVersion', 'GLRenderer']
# GLVersion class
# ---------------
class GLVersion(object):
"""Methods related to the GL version."""
# self.version_header = '#version 120'
# self.precision_header = 'precision mediump float;'
@staticmethod
def get_renderer_info():
"""Return information about the client renderer.
Arguments:
* info: a dictionary with the following keys:
* renderer_name
* opengl_version
* glsl_version
"""
return {
'renderer_name': gl.glGetString(gl.GL_RENDERER),
'opengl_version': gl.glGetString(gl.GL_VERSION),
'glsl_version': gl.glGetString(gl.GL_SHADING_LANGUAGE_VERSION)
}
@staticmethod
def version_header():
if GLVersion.get_renderer_info()['opengl_version'][0:3] < '2.1':
return '#version 110\n'
else:
return '#version 120\n'
@staticmethod
def precision_header():
if GLVersion.get_renderer_info()['glsl_version'] >= '1.3':
return 'precision mediump float;'
else:
return ''
# Low-level OpenGL functions to initialize/load variables
# -------------------------------------------------------
class Attribute(object):
"""Contains OpenGL functions related to attributes."""
@staticmethod
def create():
"""Create a new buffer and return a `buffer` index."""
return gl.glGenBuffers(1)
@staticmethod
def get_gltype(index=False):
if not index:
return gl.GL_ARRAY_BUFFER
else:
return gl.GL_ELEMENT_ARRAY_BUFFER
@staticmethod
def bind(buffer, location=None, index=False):
"""Bind a buffer and associate a given location."""
gltype = Attribute.get_gltype(index)
gl.glBindBuffer(gltype, buffer)
if location >= 0:
gl.glEnableVertexAttribArray(location)
@staticmethod
def set_attribute(location, ndim):
"""Specify the type of the attribute before rendering."""
gl.glVertexAttribPointer(location, ndim, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
@staticmethod
def convert_data(data, index=False):
"""Force 32-bit floating point numbers for data."""
if not index:
return enforce_dtype(data, np.float32)
else:
return np.array(data, np.int32)
@staticmethod
def load(data, index=False):
"""Load data in the buffer for the first time. The buffer must
have been bound before."""
data = Attribute.convert_data(data, index=index)
gltype = Attribute.get_gltype(index)
gl.glBufferData(gltype, data, gl.GL_DYNAMIC_DRAW)
@staticmethod
def update(data, onset=0, index=False):
"""Update data in the currently bound buffer."""
gltype = Attribute.get_gltype(index)
data = Attribute.convert_data(data, index=index)
# convert onset into bytes count
if data.ndim == 1:
ndim = 1
elif data.ndim == 2:
ndim = data.shape[1]
onset *= ndim * data.itemsize
gl.glBufferSubData(gltype, int(onset), data)
@staticmethod
def delete(*buffers):
"""Delete buffers."""
if buffers:
gl.glDeleteBuffers(len(buffers), buffers)
class Uniform(object):
"""Contains OpenGL functions related to uniforms."""
float_suffix = {True: 'f', False: 'i'}
array_suffix = {True: 'v', False: ''}
# glUniform[Matrix]D[f][v]
@staticmethod
def convert_data(data):
if isinstance(data, np.ndarray):
data = enforce_dtype(data, np.float32)
if type(data) == np.float64:
data = np.float32(data)
if type(data) == np.int64:
data = np.int32(data)
if type(data) == list:
data = map(Uniform.convert_data, data)
if type(data) == tuple:
data = tuple(map(Uniform.convert_data, data))
return data
@staticmethod
def load_scalar(location, data):
data = Uniform.convert_data(data)
is_float = (type(data) == float) or (type(data) == np.float32)
funname = 'glUniform1%s' % Uniform.float_suffix[is_float]
getattr(gl, funname)(location, data)
@staticmethod
def load_vector(location, data):
if len(data) > 0:
data = Uniform.convert_data(data)
is_float = (type(data[0]) == float) or (type(data[0]) == np.float32)
ndim = len(data)
funname = 'glUniform%d%s' % (ndim, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, *data)
@staticmethod
def load_array(location, data):
data = Uniform.convert_data(data)
is_float = (data.dtype == np.float32)
size, ndim = data.shape
funname = 'glUniform%d%sv' % (ndim, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, size, data)
@staticmethod
def load_matrix(location, data):
data = Uniform.convert_data(data)
is_float = (data.dtype == np.float32)
n, m = data.shape
# TODO: arrays of matrices?
if n == m:
funname = 'glUniformMatrix%d%sv' % (n, Uniform.float_suffix[is_float])
else:
funname = 'glUniformMatrix%dx%d%sv' % (n, m, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, 1, False, data)
class Texture(object):
"""Contains OpenGL functions related to textures."""
@staticmethod
def create(ndim=2, mipmap=False, minfilter=None, magfilter=None):
"""Create a texture with the specifyed number of dimensions."""
buffer = gl.glGenTextures(1)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
Texture.bind(buffer, ndim)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
gl.glTexParameteri(textype, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(textype, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
if mipmap:
if hasattr(gl, 'glGenerateMipmap'):
gl.glGenerateMipmap(textype)
else:
minfilter = 'NEAREST'
magfilter = 'NEAREST'
if minfilter is None:
minfilter = 'NEAREST'
if magfilter is None:
magfilter = 'NEAREST'
minfilter = getattr(gl, 'GL_' + minfilter)
magfilter = getattr(gl, 'GL_' + magfilter)
gl.glTexParameteri(textype, gl.GL_TEXTURE_MIN_FILTER, minfilter)
gl.glTexParameteri(textype, gl.GL_TEXTURE_MAG_FILTER, magfilter)
return buffer
@staticmethod
def bind(buffer, ndim):
"""Bind a texture buffer."""
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
gl.glBindTexture(textype, buffer)
@staticmethod
def get_info(data):
"""Return information about texture data."""
# find shape, ndim, ncomponents
shape = data.shape
if shape[0] == 1:
ndim = 1
elif shape[0] > 1:
ndim = 2
# ndim = 2
ncomponents = shape[2]
# ncomponents==1 ==> GL_R, 3 ==> GL_RGB, 4 ==> GL_RGBA
component_type = getattr(gl, ["GL_INTENSITY8", None, "GL_RGB", "GL_RGBA"] \
[ncomponents - 1])
return ndim, ncomponents, component_type
@staticmethod
def convert_data(data):
"""convert data in a array of uint8 in [0, 255]."""
if data.dtype == np.float32 or data.dtype == np.float64:
return np.array(255 * data, dtype=np.uint8)
elif data.dtype == np.uint8:
return data
else:
raise ValueError("The texture is in an unsupported format.")
@staticmethod
def copy(fbo, tex_src, tex_dst, width, height):
# /// bind the FBO
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
# /// attach the source texture to the fbo
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0,
gl.GL_TEXTURE_2D, tex_src, 0)
# /// bind the destination texture
gl.glBindTexture(gl.GL_TEXTURE_2D, tex_dst)
# /// copy from framebuffer (here, the FBO!) to the bound texture
gl.glCopyTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)
# /// unbind the FBO
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# # ncomponents==1 ==> GL_R, 3 ==> GL_RGB, 4 ==> GL_RGBA
# component_type = getattr(gl, ["GL_INTENSITY8", None, "GL_RGB", "GL_RGBA"] \
# [ncomponents - 1])
# gl.glCopyTexImage2D(gl.GL_TEXTURE_2D,
# 0, # level
# component_type,
# 0, 0, # x, y offsets
# 0, 0, # x, y
# w, h, # width, height
# 0 # border
# )
# @staticmethod
# def read_buffer(index=0):
# gl.glReadBuffer(getattr(gl, 'GL_COLOR_ATTACHMENT%d' % index))
# @staticmethod
# def draw_buffer():
# gl.glDrawBuffer(gl.GL_FRONT)
@staticmethod
def load(data):
"""Load texture data in a bound texture buffer."""
# convert data in a array of uint8 in [0, 255]
data = Texture.convert_data(data)
shape = data.shape
# get texture info
ndim, ncomponents, component_type = Texture.get_info(data)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
# print ndim, shape, data.shape
# load data in the buffer
if ndim == 1:
gl.glTexImage1D(textype, 0, component_type, shape[1], 0, component_type,
gl.GL_UNSIGNED_BYTE, data)
elif ndim == 2:
# width, height == shape[1], shape[0]: Thanks to the Confusion Club
gl.glTexImage2D(textype, 0, component_type, shape[1], shape[0], 0,
component_type, gl.GL_UNSIGNED_BYTE, data)
@staticmethod
def update(data):
"""Update a texture."""
# convert data in a array of uint8 in [0, 255]
data = Texture.convert_data(data)
shape = data.shape
# get texture info
ndim, ncomponents, component_type = Texture.get_info(data)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
# update buffer
if ndim == 1:
gl.glTexSubImage1D(textype, 0, 0, shape[1],
component_type, gl.GL_UNSIGNED_BYTE, data)
elif ndim == 2:
gl.glTexSubImage2D(textype, 0, 0, 0, shape[1], shape[0],
component_type, gl.GL_UNSIGNED_BYTE, data)
@staticmethod
def delete(*buffers):
"""Delete texture buffers."""
gl.glDeleteTextures(buffers)
class FrameBuffer(object):
"""Contains OpenGL functions related to FBO."""
@staticmethod
def create():
"""Create a FBO."""
if hasattr(gl, 'glGenFramebuffers') and gl.glGenFramebuffers:
buffer = gl.glGenFramebuffers(1)
else:
buffer = None
return buffer
@staticmethod
def bind(buffer=None):
"""Bind a FBO."""
if buffer is None:
buffer = 0
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, buffer)
@staticmethod
def bind_texture(texture, i=0):
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER,
getattr(gl, 'GL_COLOR_ATTACHMENT%d' % i),
gl.GL_TEXTURE_2D, texture, 0)
@staticmethod
def draw_buffers(n):
gl.glDrawBuffers([getattr(gl, 'GL_COLOR_ATTACHMENT%d' % i) for i in xrange(n)])
@staticmethod
def unbind():
"""Unbind a FBO."""
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# Shader manager
# --------------
class ShaderManager(object):
"""Handle vertex and fragment shaders.
TODO: integrate in the renderer the shader code creation module.
"""
# Initialization methods
# ----------------------
def __init__(self, vertex_shader, fragment_shader):
"""Compile shaders and create a program."""
# add headers
vertex_shader = GLVersion.version_header() + vertex_shader
fragment_shader = GLVersion.version_header() + fragment_shader
# set shader source
self.vertex_shader = vertex_shader
self.fragment_shader = fragment_shader
# compile shaders
self.compile()
# create program
self.program = self.create_program()
def compile_shader(self, source, shader_type):
"""Compile a shader (vertex or fragment shader).
Arguments:
* source: the shader source code as a string.
* shader_type: either gl.GL_VERTEX_SHADER or gl.GL_FRAGMENT_SHADER.
"""
# compile shader
shader = gl.glCreateShader(shader_type)
gl.glShaderSource(shader, source)
gl.glCompileShader(shader)
result = gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS)
infolog = gl.glGetShaderInfoLog(shader)
if infolog:
infolog = "\n" + infolog.strip()
# check compilation error
if not(result) and infolog:
msg = "Compilation error for %s." % str(shader_type)
if infolog is not None:
msg += infolog
msg += source
raise RuntimeError(msg)
else:
log_debug("Compilation succeeded for %s.%s" % (str(shader_type), infolog))
return shader
def compile(self):
"""Compile the shaders."""
# print self.vertex_shader
# print self.fragment_shader
self.vs = self.compile_shader(self.vertex_shader, gl.GL_VERTEX_SHADER)
self.fs = self.compile_shader(self.fragment_shader, gl.GL_FRAGMENT_SHADER)
def create_program(self):
"""Create shader program and attach shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, self.vs)
gl.glAttachShader(program, self.fs)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
# check linking error
if not(result):
msg = "Shader program linking error:"
info = gl.glGetProgramInfoLog(program)
if info:
msg += info
raise RuntimeError(msg)
self.program = program
return program
def get_attribute_location(self, name):
"""Return the location of an attribute after the shaders have compiled."""
return gl.glGetAttribLocation(self.program, name)
def get_uniform_location(self, name):
"""Return the location of a uniform after the shaders have compiled."""
return gl.glGetUniformLocation(self.program, name)
# Activation methods
# ------------------
def activate_shaders(self):
"""Activate shaders for the rest of the rendering call."""
# try:
gl.glUseProgram(self.program)
# return True
# except Exception as e:
# log_info("Error while activating the shaders: " + e.message)
# return False
def deactivate_shaders(self):
"""Deactivate shaders for the rest of the rendering call."""
# try:
gl.glUseProgram(0)
# return True
# except Exception as e:
# log_info("Error while activating the shaders: " + e.message)
# return True
# Cleanup methods
# ---------------
def detach_shaders(self):
"""Detach shaders from the program."""
if gl.glIsProgram(self.program):
gl.glDetachShader(self.program, self.vs)
gl.glDetachShader(self.program, self.fs)
def delete_shaders(self):
"""Delete the vertex and fragment shaders."""
if gl.glIsProgram(self.program):
gl.glDeleteShader(self.vs)
gl.glDeleteShader(self.fs)
def delete_program(self):
"""Delete the shader program."""
if gl.glIsProgram(self.program):
gl.glDeleteProgram(self.program)
def cleanup(self):
"""Clean up all shaders."""
self.detach_shaders()
self.delete_shaders()
self.delete_program()
# Slicing classes
# ---------------
MAX_VBO_SIZE = 65000
class Slicer(object):
"""Handle attribute slicing, necessary because of the size
of buffer objects which is limited on some GPUs."""
@staticmethod
def _get_slices(size, maxsize=None):
"""Return a list of slices for a given dataset size.
Arguments:
* size: the size of the dataset, i.e. the number of points.
Returns:
* slices: a list of pairs `(position, slice_size)` where `position`
is the position of this slice in the original buffer, and
`slice_size` the slice size.
"""
if maxsize is None:
maxsize = MAX_VBO_SIZE
if maxsize > 0:
nslices = int(np.ceil(size / float(maxsize)))
else:
nslices = 0
return [(i*maxsize, min(maxsize+1, size-i*maxsize)) for i in xrange(nslices)]
@staticmethod
def _slice_bounds(bounds, position, slice_size, regular=False):
"""Slice data bounds in a *single* slice according to the VBOs slicing.
Arguments:
* bounds: the bounds as specified by the user in `create_dataset`.
* position: the position of the current slice.
* slice_size: the size of the current slice.
Returns:
* bounds_sliced: the bounds for the current slice. It is a list an
1D array of integer indices.
"""
# first bound index after the sliced VBO: nothing to paint
if bounds[0] >= position + slice_size:
bounds_sliced = None
# last bound index before the sliced VBO: nothing to paint
elif bounds[-1] < position:
bounds_sliced = None
# the current sliced VBO intersects the bounds: something to paint
else:
bounds_sliced = bounds
if not regular:
# get the bounds that fall within the sliced VBO
ind = (bounds_sliced>=position) & (bounds_sliced<position + slice_size)
bounds_sliced = bounds_sliced[ind]
# HACK: more efficient algorithm when the bounds are regularly
# spaced
else:
d = float(regular)
p = position
b0 = bounds_sliced[0]
b1 = bounds_sliced[-1]
s = slice_size
i0 = max(0, int(np.ceil((p-b0)/d)))
i1 = max(0, int(np.floor((p+s-b0)/d)))
bounds_sliced = bounds_sliced[i0:i1+1].copy()
ind = ((b0 >= p) and (b0 < p+s), (b1 >= p) and (b1 < p+s))
"""
bounds_sliced = [b0 + d*i]
(p-b0)/d <= i0 < (p+s-b0)/d
i0 = ceil((p-b0)/d), i1 = floor((p+s-b0)/d)
ind = (bs[0] >= p & < p+s, bs[-1])
"""
# remove the onset (first index of the sliced VBO)
bounds_sliced -= position
# handle the case when the slice cuts between two bounds
if not ind[0]:
bounds_sliced = np.hstack((0, bounds_sliced))
if not ind[-1]:
bounds_sliced = np.hstack((bounds_sliced, slice_size))
return enforce_dtype(bounds_sliced, np.int32)
def set_size(self, size, doslice=True):
"""Update the total size of the buffer, and update
the slice information accordingly."""
# deactivate slicing by using a maxsize number larger than the
# actual size
if not doslice:
maxsize = 2 * size
else:
maxsize = None
self.size = size
# if not hasattr(self, 'bounds'):
# self.bounds = np.array([0, size], dtype=np.int32)
# compute the data slicing with respect to bounds (specified in the
# template) and to the maximum size of a VBO.
self.slices = self._get_slices(self.size, maxsize)
# print self.size, maxsize
# print self.slices
self.slice_count = len(self.slices)
def set_bounds(self, bounds=None):
"""Update the bound size, and update the slice information
accordingly."""
if bounds is None:
bounds = np.array([0, self.size], dtype=np.int32)
self.bounds = bounds
# is regular?
d = np.diff(bounds)
r = False
if len(d) > 0:
dm, dM = d.min(), d.max()
if dm == dM:
r = dm
# log_info("Regular bounds")
self.subdata_bounds = [self._slice_bounds(self.bounds, pos, size, r) \
for pos, size in self.slices]
class SlicedAttribute(object):
"""Encapsulate methods for slicing an attribute and handling several
buffer objects for a single attribute."""
def __init__(self, slicer, location, buffers=None):
self.slicer = slicer
self.location = location
if buffers is None:
# create the sliced buffers
self.create()
else:
log_debug("Creating sliced attribute with existing buffers " +
str(buffers))
# or use existing buffers
self.load_buffers(buffers)
def create(self):
"""Create the sliced buffers."""
self.buffers = [Attribute.create() for _ in self.slicer.slices]
def load_buffers(self, buffers):
"""Load existing buffers instead of creating new ones."""
self.buffers = buffers
def delete_buffers(self):
"""Delete all sub-buffers."""
# for buffer in self.buffers:
Attribute.delete(*self.buffers)
def load(self, data):
"""Load data on all sliced buffers."""
for buffer, (pos, size) in zip(self.buffers, self.slicer.slices):
# WARNING: putting self.location instead of None ==> SEGFAULT on Linux with Nvidia drivers
Attribute.bind(buffer, None)
Attribute.load(data[pos:pos + size,...])
def bind(self, slice=None):
if slice is None:
slice = 0
Attribute.bind(self.buffers[slice], self.location)
def update(self, data, mask=None):
"""Update data on all sliced buffers."""
# NOTE: the slicer needs to be updated if the size of the data changes
# default mask
if mask is None:
mask = np.ones(self.slicer.size, dtype=np.bool)
# is the current subVBO within the given [onset, offset]?
within = False
# update VBOs
for buffer, (pos, size) in zip(self.buffers, self.slicer.slices):
subdata = data[pos:pos + size,...]
submask = mask[pos:pos + size]
# if there is at least one True in the slice mask (submask)
if submask.any():
# this sub-buffer contains updated indices
subonset = submask.argmax()
suboffset = len(submask) - 1 - submask[::-1].argmax()
Attribute.bind(buffer, self.location)
Attribute.update(subdata[subonset:suboffset + 1,...], subonset)
# Painter class
# -------------
class Painter(object):
"""Provides low-level methods for calling OpenGL rendering commands."""
@staticmethod
def draw_arrays(primtype, offset, size):
"""Render an array of primitives."""
gl.glDrawArrays(primtype, offset, size)
@staticmethod
def draw_multi_arrays(primtype, bounds):
"""Render several arrays of primitives."""
first = bounds[:-1]
count = np.diff(bounds)
primcount = len(bounds) - 1
gl.glMultiDrawArrays(primtype, first, count, primcount)
@staticmethod
def draw_indexed_arrays(primtype, size):
gl.glDrawElements(primtype, size, gl.GL_UNSIGNED_INT, None)
# Visual renderer
# ---------------
class GLVisualRenderer(object):
"""Handle rendering of one visual"""
def __init__(self, renderer, visual):
"""Initialize the visual renderer, create the slicer, initialize
all variables and the shaders."""
# register the master renderer (to access to other visual renderers)
# and register the scene dictionary
self.renderer = renderer
self.scene = renderer.scene
# register the visual dictionary
self.visual = visual
self.framebuffer = visual.get('framebuffer', None)
# self.beforeclear = visual.get('beforeclear', None)
# options
self.options = visual.get('options', {})
# hold all data changes until the next rendering pass happens
self.data_updating = {}
self.textures_to_copy = []
# set the primitive type from its name
self.set_primitive_type(self.visual['primitive_type'])
# indexed mode? set in initialize_variables
self.use_index = None
# whether to use slicing? always True except when indexing should not
# be used, but slicing neither
self.use_slice = True
# self.previous_size = None
# set the slicer
self.slicer = Slicer()
# used when slicing needs to be deactivated (like for indexed arrays)
self.noslicer = Slicer()
# get size and bounds
size = self.visual['size']
bounds = np.array(self.visual.get('bounds', [0, size]), np.int32)
# self.update_size(size, bounds)
self.slicer.set_size(size)
self.slicer.set_bounds(bounds)
self.noslicer.set_size(size, doslice=False)
self.noslicer.set_bounds(bounds)
# compile and link the shaders
self.shader_manager = ShaderManager(self.visual['vertex_shader'],
self.visual['fragment_shader'])
# DEBUG
# log_info(self.shader_manager.vertex_shader)
# log_info(self.shader_manager.fragment_shader)
# initialize all variables
# self.initialize_normalizers()
self.initialize_variables()
self.initialize_fbocopy()
self.load_variables()
def set_primitive_type(self, primtype):
"""Set the primitive type from its name (without the GL_ prefix)."""
self.primitive_type = getattr(gl, "GL_%s" % primtype.upper())
def getarg(self, name):
"""Get a visual parameter."""
return self.visual.get(name, None)
# Variable methods
# ----------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene['visuals']
def get_visual(self, name):
"""Return a visual dictionary from its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
return None
return visuals[0]
def get_variables(self, shader_type=None):
"""Return all variables defined in the visual."""
if not shader_type:
return self.visual.get('variables', [])
else:
return [var for var in self.get_variables() \
if var['shader_type'] == shader_type]
def get_variable(self, name, visual=None):
"""Return a variable by its name, and for any given visual which
is specified by its name."""
# get the variables list
if visual is None:
variables = self.get_variables()
else:
variables = self.get_visual(visual)['variables']
variables = [v for v in variables if v.get('name', '') == name]
if not variables:
return None
return variables[0]
def resolve_reference(self, refvar):
"""Resolve a reference variable: return its true value (a Numpy array).
"""
return self.get_variable(refvar.variable, visual=refvar.visual)
# Initialization methods
# ----------------------
def initialize_fbocopy(self):
"""Create a FBO used when copying textures."""
self.fbocopy = FrameBuffer.create()
def initialize_variables(self):
"""Initialize all variables, after the shaders have compiled."""
# find out whether indexing is used or not, because in this case
# the slicing needs to be deactivated
if self.get_variables('index'):
# deactivate slicing
self.slicer = self.noslicer
log_debug("deactivating slicing because there's an indexed buffer")
self.use_index = True
else:
self.use_index = False
# initialize all variables
for var in self.get_variables():
shader_type = var['shader_type']
# skip varying
if shader_type == 'varying':
continue
name = var['name']
# call initialize_***(name) to initialize that variable
getattr(self, 'initialize_%s' % shader_type)(name)
# special case for uniforms: need to load them the first time
uniforms = self.get_variables('uniform')
self.set_data(**dict([(v['name'], v.get('data', None)) for v in uniforms]))
def initialize_attribute(self, name):
"""Initialize an attribute: get the shader location, create the
sliced buffers, and load the data."""
# retrieve the location of that attribute in the shader
location = self.shader_manager.get_attribute_location(name)
variable = self.get_variable(name)
variable['location'] = location
# deal with reference attributes: share the same buffers between
# several different visuals
if isinstance(variable.get('data', None), RefVar):
# HACK: if the targeted attribute is indexed, we should
# deactivate slicing here
if self.renderer.visual_renderers[variable['data'].visual].use_index:
log_debug("deactivating slicing")
self.slicer = self.noslicer
# use the existing buffers from the target variable
target = self.resolve_reference(variable['data'])
variable['sliced_attribute'] = SlicedAttribute(self.slicer, location,
buffers=target['sliced_attribute'].buffers)
else:
# initialize the sliced buffers
variable['sliced_attribute'] = SlicedAttribute(self.slicer, location)
def initialize_index(self, name):
variable = self.get_variable(name)
variable['buffer'] = Attribute.create()
def initialize_texture(self, name):
variable = self.get_variable(name)
# handle reference variable to texture
if isinstance(variable.get('data', None), RefVar):
target = self.resolve_reference(variable['data'])
variable['buffer'] = target['buffer']
variable['location'] = target['location']
else:
variable['buffer'] = Texture.create(variable['ndim'],
mipmap=variable.get('mipmap', None),
minfilter=variable.get('minfilter', None),
magfilter=variable.get('magfilter', None),
)
# NEW
# get the location of the sampler uniform
location = self.shader_manager.get_uniform_location(name)
variable['location'] = location
def initialize_framebuffer(self, name):
variable = self.get_variable(name)
variable['buffer'] = FrameBuffer.create()
# bind the frame buffer
FrameBuffer.bind(variable['buffer'])
# variable['texture'] is a list of texture names in the current visual
if isinstance(variable['texture'], basestring):
variable['texture'] = [variable['texture']]
# draw as many buffers as there are textures in that frame buffer
FrameBuffer.draw_buffers(len(variable['texture']))
for i, texname in enumerate(variable['texture']):
# get the texture variable:
texture = self.get_variable(texname)
# link the texture to the frame buffer
FrameBuffer.bind_texture(texture['buffer'], i)
# unbind the frame buffer
FrameBuffer.unbind()
def initialize_uniform(self, name):
"""Initialize an uniform: get the location after the shaders have
been compiled."""
location = self.shader_manager.get_uniform_location(name)
variable = self.get_variable(name)
variable['location'] = location
def initialize_compound(self, name):
pass
# Normalization methods
# ---------------------
# def initialize_normalizers(self):
# self.normalizers = {}
# Loading methods
# ---------------
def load_variables(self):
"""Load data for all variables at initialization."""
for var in self.get_variables():
shader_type = var['shader_type']
# skip uniforms
if shader_type == 'uniform' or shader_type == 'varying' or shader_type == 'framebuffer':
continue
# call load_***(name) to load that variable
getattr(self, 'load_%s' % shader_type)(var['name'])
def load_attribute(self, name, data=None):
"""Load data for an attribute variable."""
variable = self.get_variable(name)
if variable['sliced_attribute'].location < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
olddata = variable.get('data', None)
if isinstance(olddata, RefVar):
log_debug("Skipping loading data for attribute '%s' since it "
"references a target variable." % name)
return
if data is None:
data = olddata
if data is not None:
# normalization
# if name in self.options.get('normalizers', {}):
# viewbox = self.options['normalizers'][name]
# if viewbox:
# self.normalizers[name] = DataNormalizer(data)
# # normalize data with the specified viewbox, None by default
# # meaning that the natural bounds of the data are used.
# data = self.normalizers[name].normalize(viewbox)
variable['sliced_attribute'].load(data)
def load_index(self, name, data=None):
"""Load data for an index variable."""
variable = self.get_variable(name)
if data is None:
data = variable.get('data', None)
if data is not None:
self.indexsize = len(data)
Attribute.bind(variable['buffer'], index=True)
Attribute.load(data, index=True)
def load_texture(self, name, data=None):
"""Load data for a texture variable."""
variable = self.get_variable(name)
if variable['buffer'] < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
if data is None:
data = variable.get('data', None)
# NEW: update sampler location
self.update_samplers = True
if isinstance(data, RefVar):
log_debug("Skipping loading data for texture '%s' since it "
"references a target variable." % name)
return
if data is not None:
Texture.bind(variable['buffer'], variable['ndim'])
Texture.load(data)
def load_uniform(self, name, data=None):
"""Load data for an uniform variable."""
variable = self.get_variable(name)
location = variable['location']
if location < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
if data is None:
data = variable.get('data', None)
if data is not None:
ndim = variable['ndim']
size = variable.get('size', None)
# one value
if not size:
# scalar or vector
if type(ndim) == int or type(ndim) == long:
if ndim == 1:
Uniform.load_scalar(location, data)
else:
Uniform.load_vector(location, data)
# matrix
elif type(ndim) == tuple:
Uniform.load_matrix(location, data)
# array
else:
# scalar or vector
if type(ndim) == int or type(ndim) == long:
Uniform.load_array(location, data)
def load_compound(self, name, data=None):
pass
# Updating methods
# ----------------
def update_variable(self, name, data, **kwargs):
"""Update data of a variable."""
variable = self.get_variable(name)
if variable is None:
log_debug("Variable '%s' was not found, unable to update it." % name)
else:
shader_type = variable['shader_type']
# skip compound, which is handled in set_data
if shader_type == 'compound' or shader_type == 'varying' or shader_type == 'framebuffer':
pass
else:
getattr(self, 'update_%s' % shader_type)(name, data, **kwargs)
def update_attribute(self, name, data):#, bounds=None):
"""Update data for an attribute variable."""
variable = self.get_variable(name)
if variable['sliced_attribute'].location < 0:
log_debug(("Variable '%s' could not be updated, probably because "
"it is not used in the shaders") % name)
return
# handle reference variable
olddata = variable.get('data', None)
if isinstance(olddata, RefVar):
raise ValueError("Unable to load data for a reference " +
"attribute. Use the target variable directly.""")
variable['data'] = data
att = variable['sliced_attribute']
if olddata is None:
oldshape = 0
else:
oldshape = olddata.shape
# print name, oldshape, data.shape
# handle size changing
if data.shape[0] != oldshape[0]:
log_debug(("Creating new buffers for variable %s, old size=%s,"
"new size=%d") % (name, oldshape[0], data.shape[0]))
# update the size only when not using index arrays
if self.use_index:
newsize = self.slicer.size
else:
newsize = data.shape[0]
# update the slicer size and bounds
self.slicer.set_size(newsize, doslice=not(self.use_index))
# HACK: update the bounds only if there are no bounds basically
# (ie. 2 bounds only), otherwise we assume the bounds have been
# changed explicitely
if len(self.slicer.bounds) == 2:
self.slicer.set_bounds()
# delete old buffers
att.delete_buffers()
# create new buffers
att.create()
# load data
att.load(data)
# forget previous size
# self.previous_size = None
else:
# update data
att.update(data)
def update_index(self, name, data):
"""Update data for a index variable."""
variable = self.get_variable(name)
prevsize = len(variable['data'])
variable['data'] = data
newsize = len(data)
# handle size changing
if newsize != prevsize:
# update the total size (in slicer)
# self.slicer.set_size(newsize, doslice=False)
self.indexsize = newsize
# delete old buffers
Attribute.delete(variable['buffer'])
# create new buffer
variable['buffer'] = Attribute.create()
# load data
Attribute.bind(variable['buffer'], variable['ndim'], index=True)
Attribute.load(data, index=True)
else:
# update data
Attribute.bind(variable['buffer'], variable['ndim'], index=True)
Attribute.update(data, index=True)
def update_texture(self, name, data):
"""Update data for a texture variable."""
variable = self.get_variable(name)
if variable['buffer'] < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
prevshape = variable['data'].shape
variable['data'] = data
# handle size changing
if data.shape != prevshape:
# delete old buffers
# Texture.delete(variable['buffer'])
variable['ndim'], variable['ncomponents'], _ = Texture.get_info(data)
# create new buffer
# variable['buffer'] = Texture.create(variable['ndim'],
# mipmap=variable.get('mipmap', None),
# minfilter=variable.get('minfilter', None),
# magfilter=variable.get('magfilter', None),)
# load data
Texture.bind(variable['buffer'], variable['ndim'])
Texture.load(data)
else:
# update data
Texture.bind(variable['buffer'], variable['ndim'])
Texture.update(data)
def update_uniform(self, name, data):
"""Update data for an uniform variable."""
variable = self.get_variable(name)
variable['data'] = data
# the uniform interface is the same for load/update
self.load_uniform(name, data)
special_keywords = ['visible',
'size',
'bounds',
'primitive_type',
'constrain_ratio',
'constrain_navigation',
]
def set_data(self, **kwargs):
"""Load data for the specified visual. Uploading does not happen here
but in `update_all_variables` instead, since this needs to happen
after shader program binding in the paint method.
Arguments:
* **kwargs: the data to update as name:value pairs. name can be
any field of the visual, plus one of the following keywords:
* visible: whether this visual should be visible,
* size: the size of the visual,
* primitive_type: the GL primitive type,
* constrain_ratio: whether to constrain the ratio of the visual,
* constrain_navigation: whether to constrain the navigation,
"""
# handle compound variables
kwargs2 = kwargs.copy()
for name, data in kwargs2.iteritems():
variable = self.get_variable(name)
if variable is None:
# log_info("variable '%s' unknown" % name)
continue
if variable is not None and variable['shader_type'] == 'compound':
fun = variable['fun']
kwargs.pop(name)
# HACK: if the target variable in the compound is a special
# keyword, we update it in kwargs, otherwise we update the
# data in self.data_updating
# print name, fun(data)
# if name in self.special_keywords:
# kwargs.update(**fun(data))
# else:
# self.data_updating.update(**fun(data))
kwargs.update(**fun(data))
# remove non-visible variables
if not variable.get('visible', True):
kwargs.pop(name)
# handle visual visibility
visible = kwargs.pop('visible', None)
if visible is not None:
self.visual['visible'] = visible
# handle size keyword
size = kwargs.pop('size', None)
# print size
if size is not None:
self.slicer.set_size(size)
# handle bounds keyword
bounds = kwargs.pop('bounds', None)
if bounds is not None:
self.slicer.set_bounds(bounds)
# handle primitive type special keyword
primitive_type = kwargs.pop('primitive_type', None)
if primitive_type is not None:
self.visual['primitive_type'] = primitive_type
self.set_primitive_type(primitive_type)
# handle constrain_ratio keyword
constrain_ratio = kwargs.pop('constrain_ratio', None)
if constrain_ratio is not None:
self.visual['constrain_ratio'] = constrain_ratio
# handle constrain_navigation keyword
constrain_navigation = kwargs.pop('constrain_navigation', None)
if constrain_navigation is not None:
self.visual['constrain_navigation'] = constrain_navigation
# flag the other variables as to be updated
self.data_updating.update(**kwargs)
def copy_texture(self, tex1, tex2):
self.textures_to_copy.append((tex1, tex2))
def update_all_variables(self):
"""Upload all new data that needs to be updated."""
# # current size, that may change following variable updating
# if not self.previous_size:
# self.previous_size = self.slicer.size
# go through all data changes
for name, data in self.data_updating.iteritems():
if data is not None:
# log_info("Updating variable '%s'" % name)
self.update_variable(name, data)
else:
log_debug("Data for variable '%s' is None" % name)
# reset the data updating dictionary
self.data_updating.clear()
def copy_all_textures(self):
# copy textures
for tex1, tex2 in self.textures_to_copy:
# tex1 = self.get_variable(tex1)
tex1 = self.resolve_reference(tex1)
tex2 = self.get_variable(tex2)
# tex2 = self.resolve_reference(tex2)
# # Texture.read_buffer()
# Texture.bind(tex2['buffer'], tex2['ndim'])
# copy(fbo, tex_src, tex_dst, width, height)
Texture.copy(self.fbocopy, tex1['buffer'], tex2['buffer'],
tex1['shape'][0], tex1['shape'][1])
self.textures_to_copy = []
# Binding methods
# ---------------
def bind_attributes(self, slice=None):
"""Bind all attributes of the visual for the given slice.
This method is used during rendering."""
# find all visual variables with shader type 'attribute'
attributes = self.get_variables('attribute')
# for each attribute, bind the sub buffer corresponding to the given
# slice
for variable in attributes:
loc = variable['location']
if loc < 0:
log_debug(("Unable to bind attribute '%s', probably because "
"it is not used in the shaders.") % variable['name'])
continue
variable['sliced_attribute'].bind(slice)
Attribute.set_attribute(loc, variable['ndim'])
def bind_indices(self):
indices = self.get_variables('index')
for variable in indices:
Attribute.bind(variable['buffer'], index=True)
def bind_textures(self):
"""Bind all textures of the visual.
This method is used during rendering."""
textures = self.get_variables('texture')
for i, variable in enumerate(textures):
buffer = variable.get('buffer', None)
if buffer is not None:
# HACK: we update the sampler values here
if self.update_samplers and not isinstance(variable['data'], RefVar):
Uniform.load_scalar(variable['location'], i)
# NEW
gl.glActiveTexture(getattr(gl, 'GL_TEXTURE%d' % i))
Texture.bind(buffer, variable['ndim'])
else:
log_debug("Texture '%s' was not properly initialized." % \
variable['name'])
# deactivate all textures if there are not textures
if not textures:
Texture.bind(0, 1)
Texture.bind(0, 2)
# no need to update the samplers after the first execution of this
# method
self.update_samplers = False
# Paint methods
# -------------
def paint(self):
"""Paint the visual slice by slice."""
# do not display non-visible visuals
if not self.visual.get('visible', True):
return
# activate the shaders
try:
self.shader_manager.activate_shaders()
# if the shaders could not be successfully activated, stop the
# rendering immediately
except Exception as e:
log_info("Error while activating the shaders: " + str(e))
return
# update all variables
self.update_all_variables()
# bind all texturex for that slice
self.bind_textures()
# paint using indices
if self.use_index:
self.bind_attributes()
self.bind_indices()
Painter.draw_indexed_arrays(self.primitive_type, self.indexsize)
# or paint without
elif self.use_slice:
# draw all sliced buffers
for slice in xrange(len(self.slicer.slices)):
# get slice bounds
slice_bounds = self.slicer.subdata_bounds[slice]
# print slice, slice_bounds
# bind all attributes for that slice
self.bind_attributes(slice)
# call the appropriate OpenGL rendering command
# if len(self.slicer.bounds) <= 2:
# print "slice bounds", slice_bounds
if len(slice_bounds) <= 2:
Painter.draw_arrays(self.primitive_type, slice_bounds[0],
slice_bounds[1] - slice_bounds[0])
else:
Painter.draw_multi_arrays(self.primitive_type, slice_bounds)
self.copy_all_textures()
# deactivate the shaders
self.shader_manager.deactivate_shaders()
# Cleanup methods
# ---------------
def cleanup_attribute(self, name):
"""Cleanup a sliced attribute (all sub-buffers)."""
variable = self.get_variable(name)
variable['sliced_attribute'].delete_buffers()
def cleanup_texture(self, name):
"""Cleanup a texture."""
variable = self.get_variable(name)
Texture.delete(variable['buffer'])
def cleanup(self):
"""Clean up all variables."""
log_debug("Cleaning up all variables.")
for variable in self.get_variables():
shader_type = variable['shader_type']
if shader_type in ('attribute', 'texture'):
getattr(self, 'cleanup_%s' % shader_type)(variable['name'])
# clean up shaders
self.shader_manager.cleanup()
# Scene renderer
# --------------
class GLRenderer(object):
"""OpenGL renderer for a Scene.
This class takes a Scene object (dictionary) as an input, and
renders the scene. It provides methods to update the data in real-time.
"""
# Initialization
# --------------
def __init__(self, scene):
"""Initialize the renderer using the information on the scene.
Arguments:
* scene: a Scene dictionary with a `visuals` field containing
the list of visuals.
"""
self.scene = scene
self.viewport = (1., 1.)
self.visual_renderers = {}
def set_renderer_options(self):
"""Set the OpenGL options."""
options = self.scene.get('renderer_options', {})
# use vertex buffer object
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
# used for multisampling (antialiasing)
if options.get('antialiasing', None):
gl.glEnable(gl.GL_MULTISAMPLE)
# used for sprites
if options.get('sprites', True):
gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_POINT_SPRITE)
# enable transparency
if options.get('transparency', True):
gl.glEnable(gl.GL_BLEND)
blendfunc = options.get('transparency_blendfunc',
('SRC_ALPHA', 'ONE_MINUS_SRC_ALPHA')
# ('ONE_MINUS_DST_ALPHA', 'ONE')
)
blendfunc = [getattr(gl, 'GL_' + x) for x in blendfunc]
gl.glBlendFunc(*blendfunc)
# enable depth buffer, necessary for 3D rendering
if options.get('activate3D', None):
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
gl.glDepthRange(0.0, 1.0)
# TODO: always enable??
gl.glClearDepth(1.0)
# Paint the background with the specified color (black by default)
background = options.get('background', (0, 0, 0, 0))
gl.glClearColor(*background)
def get_renderer_option(self, name):
return self.scene.get('renderer_options', {}).get(name, None)
# Visual methods
# --------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene.get('visuals', [])
def get_visual(self, name):
"""Return a visual by its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
raise ValueError("The visual %s has not been found" % name)
return visuals[0]
# Data methods
# ------------
def set_data(self, name, **kwargs):
"""Load data for the specified visual. Uploading does not happen here
but in `update_all_variables` instead, since this needs to happen
after shader program binding in the paint method.
Arguments:
* visual: the name of the visual as a string, or a visual dict.
* **kwargs: the data to update as name:value pairs. name can be
any field of the visual, plus one of the following keywords:
* size: the size of the visual,
* primitive_type: the GL primitive type,
* constrain_ratio: whether to constrain the ratio of the visual,
* constrain_navigation: whether to constrain the navigation,
"""
# call set_data on the given visual renderer
if name in self.visual_renderers:
self.visual_renderers[name].set_data(**kwargs)
def copy_texture(self, name, tex1, tex2):
self.visual_renderers[name].copy_texture(tex1, tex2)
# Rendering methods
# -----------------
def initialize(self):
"""Initialize the renderer."""
# print the renderer information
for key, value in GLVersion.get_renderer_info().iteritems():
if key is not None and value is not None:
log_debug(key + ": " + value)
# initialize the renderer options using the options set in the Scene
self.set_renderer_options()
# create the VisualRenderer objects
self.visual_renderers = OrderedDict()
for visual in self.get_visuals():
name = visual['name']
self.visual_renderers[name] = GLVisualRenderer(self, visual)
# detect FBO
self.fbos = []
for name, vr in self.visual_renderers.iteritems():
fbos = vr.get_variables('framebuffer')
if fbos:
self.fbos.extend([fbo['buffer'] for fbo in fbos])
def clear(self):
"""Clear the scene."""
# clear the buffer (and depth buffer is 3D is activated)
if self.scene.get('renderer_options', {}).get('activate3D', None):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
else:
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
def paint(self):
"""Paint the scene."""
# non-FBO rendering
if not self.fbos:
self.clear()
for name, visual_renderer in self.visual_renderers.iteritems():
visual_renderer.paint()
# render each FBO separately, then non-VBO
else:
for fbo in self.fbos:
FrameBuffer.bind(fbo)
# fbo index
ifbo = self.fbos.index(fbo)
# clear
self.clear()
# paint all visual renderers
for name, visual_renderer in self.visual_renderers.iteritems():
if visual_renderer.framebuffer == ifbo:
# print ifbo, visual_renderer
visual_renderer.paint()
# finally, paint screen
FrameBuffer.unbind()
# render screen (non-FBO) visuals
self.clear()
for name, visual_renderer in self.visual_renderers.iteritems():
if visual_renderer.framebuffer == 'screen':
# print "screen", visual_renderer
visual_renderer.paint()
# print
def resize(self, width, height):
"""Resize the canvas and make appropriate changes to the scene."""
# paint within the whole window
gl.glViewport(0, 0, width, height)
# compute the constrained viewport
x = y = 1.0
if self.get_renderer_option('constrain_ratio'):
if height > 0:
aw = float(width) / height
ar = self.get_renderer_option('constrain_ratio')
if ar is True:
ar = 1.
if ar < aw:
x, y = aw / ar, 1.
else:
x, y = 1., ar / aw
self.viewport = x, y
width = float(width)
height = float(height)
# update the viewport and window size for all visuals
for visual in self.get_visuals():
self.set_data(visual['name'],
viewport=self.viewport,
window_size=(width, height))
# Cleanup methods
# ---------------
def cleanup(self):
"""Clean up all allocated OpenGL objects."""
for name, renderer in self.visual_renderers.iteritems():
renderer.cleanup()
|
1626638
|
r"""
Local Frames
The class :class:`LocalFrame` implements local frames on vector bundles
(see :class:`~sage.manifolds.vector_bundle.TopologicalVectorBundle` or
:class:`~sage.manifolds.differentiable.vector_bundle.DifferentiableVectorBundle`).
For `k=0,1,\dots`, a *local frame* on a vector bundle `E \to M` of class `C^k`
and rank `n` is a local section `(e_1,\dots,e_n):U \to E^n` of class `C^k`
defined on some subset `U` of the base space `M`, such that `e(p)` is a basis of
the fiber `E_p` for any `p \in U`.
AUTHORS:
- <NAME> (2019): initial version
EXAMPLES:
Defining a global frame on a topological vector bundle of rank 3::
sage: M = Manifold(3, 'M', structure='top')
sage: E = M.vector_bundle(3, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_M, (e_0,e_1,e_2))
This frame is now the default frame of the corresponding section module and
saved in the vector bundle::
sage: e in E.frames()
True
sage: sec_module = E.section_module(); sec_module
Free module C^0(M;E) of sections on the 3-dimensional topological manifold M
with values in the real vector bundle E of rank 3
sage: sec_module.default_basis()
Local frame (E|_M, (e_0,e_1,e_2))
However, the default frame can be changed::
sage: sec_module.set_default_basis(e)
sage: sec_module.default_basis()
Local frame (E|_M, (e_0,e_1,e_2))
The elements of a local frame are local sections in the vector bundle::
sage: for vec in e:
....: print(vec)
Section e_0 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3
Section e_1 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3
Section e_2 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3
Each element of a vector frame can be accessed by its index::
sage: e[0]
Section e_0 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3
The slice operator ``:`` can be used to access to more than one element::
sage: e[0:2]
(Section e_0 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3,
Section e_1 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3)
sage: e[:]
(Section e_0 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3,
Section e_1 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3,
Section e_2 on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3)
The index range depends on the starting index defined on the manifold::
sage: M = Manifold(3, 'M', structure='top', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: U = M.open_subset('U')
sage: c_xyz_U = c_xyz.restrict(U)
sage: E = M.vector_bundle(3, 'E')
sage: e = E.local_frame('e', domain=U); e
Local frame (E|_U, (e_1,e_2,e_3))
sage: [e[i] for i in M.irange()]
[Section e_1 on the Open subset U of the 3-dimensional topological manifold
M with values in the real vector bundle E of rank 3,
Section e_2 on the Open subset U of the 3-dimensional topological manifold
M with values in the real vector bundle E of rank 3,
Section e_3 on the Open subset U of the 3-dimensional topological manifold
M with values in the real vector bundle E of rank 3]
sage: e[1], e[2], e[3]
(Section e_1 on the Open subset U of the 3-dimensional topological manifold
M with values in the real vector bundle E of rank 3,
Section e_2 on the Open subset U of the 3-dimensional topological manifold
M with values in the real vector bundle E of rank 3,
Section e_3 on the Open subset U of the 3-dimensional topological manifold
M with values in the real vector bundle E of rank 3)
Let us check that the local sections ``e[i]`` are indeed the frame vectors
from their components with respect to the frame `e`::
sage: e[1].comp(e)[:]
[1, 0, 0]
sage: e[2].comp(e)[:]
[0, 1, 0]
sage: e[3].comp(e)[:]
[0, 0, 1]
Defining a local frame on a vector bundle, the dual coframe is automatically
created, which, by default, bares the same name (here `e`)::
sage: E.coframes()
[Local coframe (E|_U, (e^1,e^2,e^3))]
sage: e_dual = E.coframes()[0] ; e_dual
Local coframe (E|_U, (e^1,e^2,e^3))
sage: e_dual is e.coframe()
True
Let us check that the coframe `(e^i)` is indeed the dual of the vector
frame `(e_i)`::
sage: e_dual[1](e[1]) # linear form e^1 applied to local section e_1
Scalar field e^1(e_1) on the Open subset U of the 3-dimensional topological
manifold M
sage: e_dual[1](e[1]).expr() # the explicit expression of e^1(e_1)
1
sage: e_dual[1](e[1]).expr(), e_dual[1](e[2]).expr(), e_dual[1](e[3]).expr()
(1, 0, 0)
sage: e_dual[2](e[1]).expr(), e_dual[2](e[2]).expr(), e_dual[2](e[3]).expr()
(0, 1, 0)
sage: e_dual[3](e[1]).expr(), e_dual[3](e[2]).expr(), e_dual[3](e[3]).expr()
(0, 0, 1)
Via bundle automorphisms, a new frame can be created from an existing one::
sage: sec_module_U = E.section_module(domain=U)
sage: change_frame = sec_module_U.automorphism()
sage: change_frame[:] = [[0,1,0],[0,0,1],[1,0,0]]
sage: f = e.new_frame(change_frame, 'f'); f
Local frame (E|_U, (f_1,f_2,f_3))
A copy of this automorphism and its inverse is now part of the vector bundle's
frame changes::
sage: E.change_of_frame(e, f)
Automorphism of the Free module C^0(U;E) of sections on the Open subset U of
the 3-dimensional topological manifold M with values in the real vector
bundle E of rank 3
sage: E.change_of_frame(e, f) == change_frame
True
sage: E.change_of_frame(f, e) == change_frame.inverse()
True
Let us check the components of `f` with respect to the frame `e`::
sage: f[1].comp(e)[:]
[0, 0, 1]
sage: f[2].comp(e)[:]
[1, 0, 0]
sage: f[3].comp(e)[:]
[0, 1, 0]
"""
#******************************************************************************
# Copyright (C) 2013-2018 <NAME> <<EMAIL>>
# Copyright (C) 2019 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.tensor.modules.free_module_basis import (FreeModuleBasis,
FreeModuleCoBasis)
from sage.tensor.modules.finite_rank_free_module import FiniteRankFreeModule
class LocalCoFrame(FreeModuleCoBasis):
r"""
Local coframe on a vector bundle.
A *local coframe* on a vector bundle `E \to M` of class `C^k` is a
local section `e^*: U \to E^n` of class `C^k` on some subset `U` of the base
space `M`, such that `e^*(p)` is a basis of the fiber `E^*_p` of the dual
bundle for any `p \in U`.
INPUT:
- ``frame`` -- the local frame dual to the coframe
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the linear forms constituting the coframe, or a tuple of
strings, representing the individual symbols of the linear forms
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the linear forms constituting
the coframe, or a tuple of strings, representing the individual LaTeX
symbols of the linear forms; if ``None``, ``symbol`` is used in place of
``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a single
string) tuple of strings representing the indices labelling the linear
forms of the coframe; if ``None``, the indices will be generated as
integers within the range declared on the coframe's domain
- ``latex_indices`` -- (default: ``None``) tuple of strings representing
the indices for the LaTeX symbols of the linear forms of the coframe; if
``None``, ``indices`` is used instead
EXAMPLES:
Local coframe on a topological vector bundle of rank 3::
sage: M = Manifold(3, 'M', structure='top', start_index=1)
sage: X.<x,y,z> = M.chart()
sage: E = M.vector_bundle(3, 'E')
sage: e = E.local_frame('e')
sage: from sage.manifolds.local_frame import LocalCoFrame
sage: f = LocalCoFrame(e, 'f'); f
Local coframe (E|_M, (f^1,f^2,f^3))
The local coframe can also be obtained by using the method
:meth:`~sage.tensor.modules.free_module_basis.FreeModuleBasis.dual_basis` or
:meth:`~sage.manifolds.local_frame.LocalFrame.coframe`::
sage: e_dual = e.dual_basis(); e_dual
Local coframe (E|_M, (e^1,e^2,e^3))
sage: e_dual is e.coframe()
True
sage: e_dual is f
False
sage: e_dual[:] == f[:]
True
sage: f[1].display(e)
f^1 = e^1
The consisted linear forms can be obtained via the operator ``[]``::
sage: f[1], f[2], f[3]
(Linear form f^1 on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3,
Linear form f^2 on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3,
Linear form f^3 on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3)
Checking that `f` is the dual of `e`::
sage: f[1](e[1]).expr(), f[1](e[2]).expr(), f[1](e[3]).expr()
(1, 0, 0)
sage: f[2](e[1]).expr(), f[2](e[2]).expr(), f[2](e[3]).expr()
(0, 1, 0)
sage: f[3](e[1]).expr(), f[3](e[2]).expr(), f[3](e[3]).expr()
(0, 0, 1)
"""
def __init__(self, frame, symbol, latex_symbol=None, indices=None,
latex_indices=None):
r"""
Construct a local coframe, dual to a given local frame.
TESTS::
sage: M = Manifold(3, 'M')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e')
sage: from sage.manifolds.local_frame import LocalCoFrame
sage: f = LocalCoFrame(e, 'f'); f
Local coframe (E|_M, (f^0,f^1))
sage: TestSuite(f).run()
"""
self._domain = frame.domain()
self._base_space = frame.base_space()
self._vbundle = frame.vector_bundle()
FreeModuleCoBasis.__init__(self, frame, symbol,
latex_symbol=latex_symbol, indices=indices,
latex_indices=latex_indices)
# The coframe is added to the vector bundle's set of coframes
self._vbundle._coframes.append(self)
def _repr_(self):
r"""
String representation of ``self``.
TESTS::
sage: M = Manifold(3, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e')
sage: f = e.coframe()
sage: f._repr_()
'Local coframe (E|_M, (e^0,e^1))'
sage: repr(f) # indirect doctest
'Local coframe (E|_M, (e^0,e^1))'
sage: f # indirect doctest
Local coframe (E|_M, (e^0,e^1))
"""
desc = "Local coframe " + self._name
return desc
def at(self, point):
r"""
Return the value of ``self`` at a given point on the base space, this
value being a basis of the dual vector bundle at this point.
INPUT:
- ``point`` -- :class:`~sage.manifolds.point.ManifoldPoint`;
point `p` in the domain `U` of the coframe (denoted `f` hereafter)
OUTPUT:
- :class:`~sage.tensor.modules.free_module_basis.FreeModuleCoBasis`
representing the basis `f(p)` of the vector space `E^*_p`,
dual to the vector bundle fiber `E_p`
EXAMPLES:
Cobasis of a vector bundle fiber::
sage: M = Manifold(2, 'M', structure='top', start_index=1)
sage: X.<x,y> = M.chart()
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e')
sage: e_dual = e.coframe(); e_dual
Local coframe (E|_M, (e^1,e^2))
sage: p = M.point((-1,2), name='p')
sage: e_dual_p = e_dual.at(p) ; e_dual_p
Dual basis (e^1,e^2) on the Fiber of E at Point p on the
2-dimensional topological manifold M
sage: type(e_dual_p)
<class 'sage.tensor.modules.free_module_basis.FreeModuleCoBasis'>
sage: e_dual_p[1]
Linear form e^1 on the Fiber of E at Point p on the 2-dimensional
topological manifold M
sage: e_dual_p[2]
Linear form e^2 on the Fiber of E at Point p on the 2-dimensional
topological manifold M
sage: e_dual_p is e.at(p).dual_basis()
True
"""
return self._basis.at(point).dual_basis()
def set_name(self, symbol, latex_symbol=None, indices=None,
latex_indices=None, index_position='up',
include_domain=True):
r"""
Set (or change) the text name and LaTeX name of ``self``.
INPUT:
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the linear forms constituting the coframe, or a list/tuple
of strings, representing the individual symbols of the linear forms
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the linear forms
constituting the coframe, or a list/tuple of strings, representing the
individual LaTeX symbols of the linear forms; if ``None``, ``symbol``
is used in place of ``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a
single string) tuple of strings representing the indices labelling
the linear forms of the coframe; if ``None``, the indices will be
generated as integers within the range declared on ``self``
- ``latex_indices`` -- (default: ``None``) tuple of strings
representing the indices for the LaTeX symbols of the linear forms;
if ``None``, ``indices`` is used instead
- ``index_position`` -- (default: ``'up'``) determines the position
of the indices labelling the linear forms of the coframe; can be
either ``'down'`` or ``'up'``
- ``include_domain`` -- (default: ``True``) boolean determining whether
the name of the domain is included in the beginning of the coframe
name
EXAMPLES::
sage: M = Manifold(3, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e').coframe(); e
Local coframe (E|_M, (e^0,e^1))
sage: e.set_name('f'); e
Local coframe (E|_M, (f^0,f^1))
sage: e.set_name('e', latex_symbol=r'\epsilon')
sage: latex(e)
\left(E|_{M}, \left(\epsilon^{0},\epsilon^{1}\right)\right)
sage: e.set_name('e', include_domain=False); e
Local coframe (e^0,e^1)
sage: e.set_name(['a', 'b'], latex_symbol=[r'\alpha', r'\beta']); e
Local coframe (E|_M, (a,b))
sage: latex(e)
\left(E|_{M}, \left(\alpha,\beta\right)\right)
sage: e.set_name('e', indices=['x','y'],
....: latex_indices=[r'\xi', r'\zeta']); e
Local coframe (E|_M, (e^x,e^y))
sage: latex(e)
\left(E|_{M}, \left(e^{\xi},e^{\zeta}\right)\right)
"""
super(LocalCoFrame, self).set_name(symbol, latex_symbol=latex_symbol,
indices=indices,
latex_indices=latex_indices,
index_position=index_position)
if include_domain:
# Redefinition of the name and the LaTeX name to include the domain
self._name = "({}|_{}, {})".format(self._vbundle._name,
self._domain._name, self._name)
self._latex_name = r"\left({}|_{{{}}}, {}\right)".format(
self._vbundle._latex_name,
self._domain._latex_name,
self._latex_name)
#******************************************************************************
class LocalFrame(FreeModuleBasis):
r"""
Local frame on a vector bundle.
A *local frame* on a vector bundle `E \to M` of class `C^k` is a local
section `(e_1,\dots,e_n):U \to E^n` of class `C^k` defined on some subset `U`
of the base space `M`, such that `e(p)` is a basis of the fiber `E_p` for
any `p \in U`.
For each instantiation of a local frame, a local coframe is automatically
created, as an instance of the class :class:`LocalCoFrame`. It is returned
by the method :meth:`coframe`.
INPUT:
- ``section_module`` -- free module of local sections over `U` in the given
vector bundle `E \to M`
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the local sections constituting the local frame, or a tuple
of strings, representing the individual symbols of the local sections
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the local sections constituting
the local frame, or a tuple of strings, representing the individual
LaTeX symbols of the local sections; if ``None``, ``symbol`` is used in
place of ``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a single
string) tuple of strings representing the indices labelling the local
sections of the frame; if ``None``, the indices will be generated as
integers within the range declared on the local frame's domain
- ``latex_indices`` -- (default: ``None``) tuple of strings representing
the indices for the LaTeX symbols of the local sections; if
``None``, ``indices`` is used instead
- ``symbol_dual`` -- (default: ``None``) same as ``symbol`` but for the
dual coframe; if ``None``, ``symbol`` must be a string and is used
for the common base of the symbols of the elements of the dual coframe
- ``latex_symbol_dual`` -- (default: ``None``) same as ``latex_symbol``
but for the dual coframe
EXAMPLES:
Defining a local frame on a 3-dimensional vector bundle over a 3-dimensional
manifold::
sage: M = Manifold(3, 'M', start_index=1, structure='top')
sage: E = M.vector_bundle(3, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_M, (e_1,e_2,e_3))
sage: latex(e)
\left(E|_{M}, \left(e_{1},e_{2},e_{3}\right)\right)
The individual elements of the vector frame are accessed via square
brackets, with the possibility to invoke the slice operator '``:``' to
get more than a single element::
sage: e[2]
Section e_2 on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3
sage: e[1:3]
(Section e_1 on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3,
Section e_2 on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3)
sage: e[:]
(Section e_1 on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3,
Section e_2 on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3,
Section e_3 on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3)
The LaTeX symbol can be specified::
sage: eps = E.local_frame('eps', latex_symbol=r'\epsilon')
sage: latex(eps)
\left(E|_{M}, \left(\epsilon_{1},\epsilon_{2},\epsilon_{3}\right)\right)
By default, the elements of the local frame are labelled by integers
within the range specified at the manifold declaration. It is however
possible to fully customize the labels, via the argument ``indices``::
sage: u = E.local_frame('u', indices=('x', 'y', 'z')) ; u
Local frame (E|_M, (u_x,u_y,u_z))
sage: u[1]
Section u_x on the 3-dimensional topological manifold M with values in
the real vector bundle E of rank 3
sage: u.coframe()
Local coframe (E|_M, (u^x,u^y,u^z))
The LaTeX format of the indices can be adjusted::
sage: v = E.local_frame('v', indices=('a', 'b', 'c'),
....: latex_indices=(r'\alpha', r'\beta', r'\gamma'))
sage: v
Local frame (E|_M, (v_a,v_b,v_c))
sage: latex(v)
\left(E|_{M}, \left(v_{\alpha},v_{\beta},v_{\gamma}\right)\right)
sage: latex(v.coframe())
\left(E|_{M}, \left(v^{\alpha},v^{\beta},v^{\gamma}\right)\right)
The symbol of each element of the local frame can also be freely chosen,
by providing a tuple of symbols as the first argument of ``local_frame``;
it is then mandatory to specify as well some symbols for the dual coframe::
sage: h = E.local_frame(('a', 'b', 'c'), symbol_dual=('A', 'B', 'C')); h
Local frame (E|_M, (a,b,c))
sage: h[1]
Section a on the 3-dimensional topological manifold M with values in the
real vector bundle E of rank 3
sage: h.coframe()
Local coframe (E|_M, (A,B,C))
sage: h.coframe()[1]
Linear form A on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3
Local frames are bases of free modules formed by local sections::
sage: N = Manifold(2, 'N', structure='top', start_index=1)
sage: X.<x,y> = N.chart()
sage: U = N.open_subset('U')
sage: F = N.vector_bundle(2, 'F')
sage: f = F.local_frame('f', domain=U)
sage: f.module()
Free module C^0(U;F) of sections on the Open subset U of the
2-dimensional topological manifold N with values in the real vector
bundle F of rank 2
sage: f.module().base_ring()
Algebra of scalar fields on the Open subset U of the 2-dimensional
topological manifold N
sage: f.module() is F.section_module(domain=f.domain())
True
sage: f in F.section_module(domain=U).bases()
True
The value of the local frame at a given point is a basis of the
corresponding fiber::
sage: X_U = X.restrict(U) # We need coordinates on the subset
sage: p = N((0,1), name='p') ; p
Point p on the 2-dimensional topological manifold N
sage: f.at(p)
Basis (f_1,f_2) on the Fiber of F at Point p on the 2-dimensional
topological manifold N
"""
# The following class attribute must be redefined by any derived class:
_cobasis_class = LocalCoFrame
@staticmethod
def __classcall_private__(cls, section_module, symbol,
latex_symbol=None, indices=None,
latex_indices=None, symbol_dual=None,
latex_symbol_dual=None):
"""
Transform input lists into tuples for the unique representation of
LocalFrame.
TESTS::
sage: M = Manifold(3, 'M')
sage: E = M.vector_bundle(2, 'E')
sage: C0 = E.section_module(force_free=True)
sage: from sage.manifolds.local_frame import LocalFrame
sage: e = LocalFrame(C0, ['a', 'b'], symbol_dual=['A', 'B']); e
Local frame (E|_M, (a,b))
sage: e.dual_basis()
Local coframe (E|_M, (A,B))
sage: e is LocalFrame(C0, ('a', 'b'), symbol_dual=('A', 'B'))
True
"""
if isinstance(symbol, list):
symbol = tuple(symbol)
if isinstance(latex_symbol, list):
latex_symbol = tuple(latex_symbol)
if isinstance(indices, list):
indices = tuple(indices)
if isinstance(latex_indices, list):
latex_indices = tuple(latex_indices)
if isinstance(symbol_dual, list):
symbol_dual = tuple(symbol_dual)
if isinstance(latex_symbol_dual, list):
latex_symbol_dual = tuple(latex_symbol_dual)
return super(LocalFrame, cls).__classcall__(cls, section_module,
symbol, latex_symbol=latex_symbol,
indices=indices,
latex_indices=latex_indices,
symbol_dual=symbol_dual,
latex_symbol_dual=latex_symbol_dual)
def __init__(self, section_module, symbol, latex_symbol=None, indices=None,
latex_indices=None, symbol_dual=None, latex_symbol_dual=None):
r"""
Construct a local frame on a vector bundle.
TESTS:
sage: M = Manifold(3, 'M')
sage: E = M.vector_bundle(2, 'E')
sage: C0 = E.section_module(force_free=True)
sage: from sage.manifolds.local_frame import LocalFrame
sage: e = LocalFrame(C0, 'e', latex_symbol=r'\epsilon'); e
Local frame (E|_M, (e_0,e_1))
sage: TestSuite(e).run()
"""
###
# Some sanity check:
if not isinstance(section_module, FiniteRankFreeModule):
raise ValueError("the {} has already been constructed as a "
"non-free module and therefore cannot have "
"a basis".format(section_module))
self._domain = section_module.domain()
self._base_space = section_module.base_space()
self._vbundle = section_module.vector_bundle()
FreeModuleBasis.__init__(self, section_module,
symbol, latex_symbol=latex_symbol,
indices=indices, latex_indices=latex_indices,
symbol_dual=symbol_dual,
latex_symbol_dual=latex_symbol_dual)
if self._vbundle._def_frame is None:
self._vbundle._def_frame = self
# The frame is added to the domain's modules of frames, as well as to
# all the superdomain's modules of frames; moreover the first defined
# frame is considered as the default one
for sd in self._domain._supersets:
if sd in self._vbundle._section_modules:
smodule = self._vbundle._section_modules[sd]
if smodule.default_frame() is None:
smodule.set_default_frame(self)
# Initialization of the zero element of the section module:
if not isinstance(smodule, FiniteRankFreeModule):
smodule(0)._add_comp_unsafe(self)
# (since new components are initialized to zero)
###
# Add this frame to the list of frames of the overlying vector bundle:
self._vbundle._add_local_frame(self)
self._coframe = self.dual_basis() # Shortcut for self._dual_basis
###
# Frame restrictions:
self._subframes = set([self]) # Set of frames which are just a
# restriction of self
self._superframes = set([self]) # Set of frames for which self is a
# restriction of
self._restrictions = {} # Key: subdomain of self._domain; value:
# restriction of self on this subdomain
###### Methods that must be redefined by derived classes of ######
###### FreeModuleBasis ######
def _repr_(self):
r"""
String representation of ``self``.
TESTS::
sage: M = Manifold(2, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_M, (e_0,e_1))
sage: repr(e) # indirect doctest
'Local frame (E|_M, (e_0,e_1))'
sage: e # indirect doctest
Local frame (E|_M, (e_0,e_1))
"""
desc = "Local frame " + self._name
return desc
def _new_instance(self, symbol, latex_symbol=None, indices=None,
latex_indices=None, symbol_dual=None,
latex_symbol_dual=None):
r"""
Construct a new local frame on the same section module as ``self``.
INPUT:
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the sections constituting the local frame, or a
tuple of strings, representing the individual symbols of the local
sections
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the local sections
constituting the local frame, or a tuple of strings, representing
the individual LaTeX symbols of the local sections; if ``None``,
``symbol`` is used in place of ``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a
single string) tuple of strings representing the indices labelling
the local sections of the frame; if ``None``, the indices will be
generated as integers within the range declared on the local frame's
domain
- ``latex_indices`` -- (default: ``None``) tuple of strings
representing the indices for the LaTeX symbols of the local sections;
if ``None``, ``indices`` is used instead
- ``symbol_dual`` -- (default: ``None``) same as ``symbol`` but for the
dual coframe; if ``None``, ``symbol`` must be a string and is used
for the common base of the symbols of the elements of the dual
coframe
- ``latex_symbol_dual`` -- (default: ``None``) same as ``latex_symbol``
but for the dual coframe
OUTPUT:
- instance of :class:`LocalFrame`
TESTS::
sage: M = Manifold(2, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e')
sage: e._new_instance('f')
Local frame (E|_M, (f_0,f_1))
"""
return LocalFrame(self._fmodule, symbol, latex_symbol=latex_symbol,
indices=indices, latex_indices=latex_indices,
symbol_dual=symbol_dual,
latex_symbol_dual=latex_symbol_dual)
###### End of methods to be redefined by derived classes ######
def domain(self):
r"""
Return the domain on which ``self`` is defined.
EXAMPLES::
sage: M = Manifold(3, 'M', structure='top')
sage: U = M.open_subset('U')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e', domain=U); e
Local frame (E|_U, (e_0,e_1))
sage: e.domain()
Open subset U of the 3-dimensional topological manifold M
"""
return self._domain
def base_space(self):
r"""
Return the base space on which the overlying vector bundle is defined.
EXAMPLES::
sage: M = Manifold(3, 'M', structure='top')
sage: U = M.open_subset('U')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e', domain=U)
sage: e.base_space()
3-dimensional topological manifold M
"""
return self._base_space
def vector_bundle(self):
r"""
Return the vector bundle on which ``self`` is defined.
EXAMPLES::
sage: M = Manifold(3, 'M', structure='top')
sage: U = M.open_subset('U')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e', domain=U)
sage: e.vector_bundle()
Topological real vector bundle E -> M of rank 2 over the base space
3-dimensional topological manifold M
sage: e.vector_bundle() is E
True
"""
return self._vbundle
def coframe(self):
r"""
Return the coframe of ``self``.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_M, (e_0,e_1))
sage: e.coframe()
Local coframe (E|_M, (e^0,e^1))
"""
return self._coframe
def new_frame(self, change_of_frame, symbol, latex_symbol=None,
indices=None, latex_indices=None, symbol_dual=None,
latex_symbol_dual=None):
r"""
Define a new local frame from ``self``.
The new local frame is defined from vector bundle automorphisms; its
module is the same as that of the current frame.
INPUT:
- ``change_of_frame`` --
:class:`~sage.tensor.modules.free_module_automorphism.FreeModuleAutomorphism`;
vector bundle automorphisms `P` that relates
the current frame `(e_i)` to the new frame `(f_i)` according
to `f_i = P(e_i)`
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the sections constituting the local frame, or a
list/tuple of strings, representing the individual symbols of the
sections
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the sections
constituting the local frame, or a list/tuple of strings,
representing the individual LaTeX symbols of the sections;
if ``None``, ``symbol`` is used in place of ``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a
single string) tuple of strings representing the indices labelling
the sections of the frame; if ``None``, the indices will be
generated as integers within the range declared on ``self``
- ``latex_indices`` -- (default: ``None``) tuple of strings
representing the indices for the LaTeX symbols of the sections;
if ``None``, ``indices`` is used instead
- ``symbol_dual`` -- (default: ``None``) same as ``symbol`` but for the
dual coframe; if ``None``, ``symbol`` must be a string and is used
for the common base of the symbols of the elements of the dual
coframe
- ``latex_symbol_dual`` -- (default: ``None``) same as ``latex_symbol``
but for the dual coframe
OUTPUT:
- the new frame `(f_i)`, as an instance of :class:`LocalFrame`
EXAMPLES:
Orthogonal transformation of a frame on the 2-dimensional trivial vector
bundle over the Euclidean plane::
sage: M = Manifold(2, 'R^2', structure='top', start_index=1)
sage: c_cart.<x,y> = M.chart()
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_R^2, (e_1,e_2))
sage: orth = E.section_module().automorphism()
sage: orth[:] = [[sqrt(3)/2, -1/2], [1/2, sqrt(3)/2]]
sage: f = e.new_frame(orth, 'f')
sage: f[1][:]
[1/2*sqrt(3), 1/2]
sage: f[2][:]
[-1/2, 1/2*sqrt(3)]
sage: a = E.change_of_frame(e,f)
sage: a[:]
[1/2*sqrt(3) -1/2]
[ 1/2 1/2*sqrt(3)]
sage: a == orth
True
sage: a is orth
False
sage: a._components # random (dictionary output)
{Local frame (E|_D_0, (e_1,e_2)): 2-indices components w.r.t.
Local frame (E|_D_0, (e_1,e_2)),
Local frame (E|_D_0, (f_1,f_2)): 2-indices components w.r.t.
Local frame (E|_D_0, (f_1,f_2))}
sage: a.comp(f)[:]
[1/2*sqrt(3) -1/2]
[ 1/2 1/2*sqrt(3)]
sage: a1 = E.change_of_frame(f,e)
sage: a1[:]
[1/2*sqrt(3) 1/2]
[ -1/2 1/2*sqrt(3)]
sage: a1 == orth.inverse()
True
sage: a1 is orth.inverse()
False
sage: e[1].comp(f)[:]
[1/2*sqrt(3), -1/2]
sage: e[2].comp(f)[:]
[1/2, 1/2*sqrt(3)]
"""
the_new_frame = self.new_basis(change_of_frame, symbol,
latex_symbol=latex_symbol,
indices=indices,
latex_indices=latex_indices,
symbol_dual=symbol_dual,
latex_symbol_dual=latex_symbol_dual)
self._vbundle._frame_changes[(self, the_new_frame)] = \
self._fmodule._basis_changes[(self, the_new_frame)]
self._vbundle._frame_changes[(the_new_frame, self)] = \
self._fmodule._basis_changes[(the_new_frame, self)]
return the_new_frame
def restrict(self, subdomain):
r"""
Return the restriction of ``self`` to some open subset of its domain.
If the restriction has not been defined yet, it is constructed here.
INPUT:
- ``subdomain`` -- open subset `V` of the current frame domain `U`
OUTPUT:
- the restriction of the current frame to `V` as a :class:`LocalFrame`
EXAMPLES:
Restriction of a frame defined on `\RR^2` to the unit disk::
sage: M = Manifold(2, 'R^2', structure='top', start_index=1)
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_R^2, (e_1,e_2))
sage: a = E.section_module().automorphism()
sage: a[:] = [[1-y^2,0], [1+x^2, 2]]
sage: f = e.new_frame(a, 'f'); f
Local frame (E|_R^2, (f_1,f_2))
sage: U = M.open_subset('U', coord_def={c_cart: x^2+y^2<1})
sage: e_U = e.restrict(U); e_U
Local frame (E|_U, (e_1,e_2))
sage: f_U = f.restrict(U) ; f_U
Local frame (E|_U, (f_1,f_2))
The vectors of the restriction have the same symbols as those of the
original frame::
sage: f_U[1].display()
f_1 = (-y^2 + 1) e_1 + (x^2 + 1) e_2
sage: f_U[2].display()
f_2 = 2 e_2
Actually, the components are the restrictions of the original frame
vectors::
sage: f_U[1] is f[1].restrict(U)
True
sage: f_U[2] is f[2].restrict(U)
True
"""
if subdomain == self._domain:
return self
if subdomain not in self._restrictions:
if not subdomain.is_subset(self._domain):
raise ValueError("the provided domain is not a subdomain of " +
"the current frame's domain")
# First one tries to get the restriction from a tighter domain:
for dom, rst in self._restrictions.items():
if subdomain.is_subset(dom) and subdomain in rst._restrictions:
res = rst._restrictions[subdomain]
self._restrictions[subdomain] = res
res._superframes.update(self._superframes)
for sframe2 in self._superframes:
sframe2._subframes.add(res)
return self._restrictions[subdomain]
# Secondly one tries to get the restriction from one previously
# defined on a larger domain:
for sframe in self._superframes:
if subdomain in sframe._restrictions:
res = sframe._restrictions[subdomain]
self._restrictions[subdomain] = res
res._superframes.update(self._superframes)
for sframe2 in self._superframes:
sframe2._subframes.add(res)
return self._restrictions[subdomain]
# If this point is reached, the restriction has to be created
# from scratch
resmodule = self._vbundle.section_module(domain=subdomain,
force_free=True)
res = LocalFrame(resmodule,
self._symbol, latex_symbol=self._latex_symbol,
indices=self._indices,
latex_indices=self._latex_indices,
symbol_dual=self._symbol_dual,
latex_symbol_dual=self._latex_symbol_dual)
new_vectors = list()
for i in self._fmodule.irange():
vrest = self[i].restrict(subdomain)
for j in self._fmodule.irange():
vrest.add_comp(res)[j] = 0
vrest.add_comp(res)[i] = 1
new_vectors.append(vrest)
res._vec = tuple(new_vectors)
# Update of superframes and subframes:
for sframe in self._subframes:
if subdomain.is_subset(sframe.domain()):
res._superframes.update(sframe._superframes)
for sframe in res._superframes:
sframe._subframes.add(res)
sframe._restrictions[subdomain] = res # includes sframe = self
for dom, rst in self._restrictions.items():
if dom.is_subset(subdomain):
res._restrictions.update(rst._restrictions)
res._subframes.update(rst._subframes)
rst._superframes.update(res._superframes)
return self._restrictions[subdomain]
def at(self, point):
r"""
Return the value of ``self`` at a given point, this value being
a basis of the vector bundle fiber at the point.
INPUT:
- ``point`` -- :class:`~sage.manifolds.point.ManifoldPoint`; point
`p` in the domain `U` of the local frame (denoted `e` hereafter)
OUTPUT:
- :class:`~sage.tensor.modules.free_module_basis.FreeModuleBasis`
representing the basis `e(p)` of the vector bundle fiber
`E_p`
EXAMPLES:
Basis of a fiber of a trivial vector bundle::
sage: M = Manifold(2, 'M', structure='top')
sage: X.<x,y> = M.chart()
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_M, (e_0,e_1))
sage: p = M.point((-1,2), name='p')
sage: ep = e.at(p) ; ep
Basis (e_0,e_1) on the Fiber of E at Point p on the 2-dimensional
topological manifold M
sage: type(ep)
<class 'sage.tensor.modules.free_module_basis.FreeModuleBasis'>
sage: ep[0]
Vector e_0 in the fiber of E at Point p on the 2-dimensional
topological manifold M
sage: ep[1]
Vector e_1 in the fiber of E at Point p on the 2-dimensional
topological manifold M
Note that the symbols used to denote the vectors are same as those
for the vector fields of the frame. At this stage, ``ep`` is the unique
basis on fiber at ``p``::
sage: Ep = E.fiber(p)
sage: Ep.bases()
[Basis (e_0,e_1) on the Fiber of E at Point p on the 2-dimensional
topological manifold M]
Let us consider another local frame::
sage: aut = E.section_module().automorphism()
sage: aut[:] = [[1+y^2, 0], [0, 2]]
sage: f = e.new_frame(aut, 'f') ; f
Local frame (E|_M, (f_0,f_1))
sage: fp = f.at(p) ; fp
Basis (f_0,f_1) on the Fiber of E at Point p on the 2-dimensional
topological manifold M
There are now two bases on the fiber::
sage: Ep.bases()
[Basis (e_0,e_1) on the Fiber of E at Point p on the 2-dimensional
topological manifold M,
Basis (f_0,f_1) on the Fiber of E at Point p on the 2-dimensional
topological manifold M]
Moreover, the changes of bases in the tangent space have been
computed from the known relation between the frames ``e`` and
``f`` (via the automorphism ``aut`` defined above)::
sage: Ep.change_of_basis(ep, fp)
Automorphism of the Fiber of E at Point p on the 2-dimensional
topological manifold M
sage: Ep.change_of_basis(ep, fp).display()
5 e_0⊗e^0 + 2 e_1⊗e^1
sage: Ep.change_of_basis(fp, ep)
Automorphism of the Fiber of E at Point p on the 2-dimensional
topological manifold M
sage: Ep.change_of_basis(fp, ep).display()
1/5 e_0⊗e^0 + 1/2 e_1⊗e^1
The dual bases::
sage: e.coframe()
Local coframe (E|_M, (e^0,e^1))
sage: ep.dual_basis()
Dual basis (e^0,e^1) on the Fiber of E at Point p on the
2-dimensional topological manifold M
sage: ep.dual_basis() is e.coframe().at(p)
True
sage: f.coframe()
Local coframe (E|_M, (f^0,f^1))
sage: fp.dual_basis()
Dual basis (f^0,f^1) on the Fiber of E at Point p on the
2-dimensional topological manifold M
sage: fp.dual_basis() is f.coframe().at(p)
True
"""
# Determination of the vector bundle fiber:
if point not in self._domain:
raise ValueError("the {} is not a point in the ".format(point) +
"domain of {}".format(self))
vbf = self._vbundle.fiber(point)
# If the basis has already been constructed, it is simply returned:
vbf_frame_bases = vbf._frame_bases
if self in vbf_frame_bases:
return vbf_frame_bases[self]
for frame in vbf_frame_bases:
if self in frame._subframes or self in frame._superframes:
return vbf_frame_bases[frame]
# If this point is reached, the basis has to be constructed from
# scratch.
# The names of the basis vectors set to those of the frame sections:
basis = vbf.basis(self._symbol, latex_symbol=self._latex_symbol,
indices=self._indices,
latex_indices=self._latex_indices,
symbol_dual=self._symbol_dual,
latex_symbol_dual=self._latex_symbol_dual)
vbf_frame_bases[self] = basis
# Update of the change of bases in the fiber:
for frame_pair, automorph in self._vbundle._frame_changes.items():
frame1 = frame_pair[0]
frame2 = frame_pair[1]
if frame1 is self:
fr2 = None
for frame in vbf_frame_bases:
if frame2 in frame._subframes:
fr2 = frame
break
if fr2 is not None:
basis1 = basis
basis2 = vbf_frame_bases[fr2]
auto = vbf.automorphism()
for frame, comp in automorph._components.items():
bas = None
if frame is frame1:
bas = basis1
if frame is frame2:
bas = basis2
if bas is not None:
cauto = auto.add_comp(bas)
for ind, val in comp._comp.items():
cauto._comp[ind] = val(point)
vbf._basis_changes[(basis1, basis2)] = auto
if frame2 is self:
fr1 = None
for frame in vbf_frame_bases:
if frame1 in frame._subframes:
fr1 = frame
break
if fr1 is not None:
basis1 = vbf_frame_bases[fr1]
basis2 = basis
auto = vbf.automorphism()
for frame, comp in automorph._components.items():
bas = None
if frame is frame1:
bas = basis1
if frame is frame2:
bas = basis2
if bas is not None:
cauto = auto.add_comp(bas)
for ind, val in comp._comp.items():
cauto._comp[ind] = val(point)
vbf._basis_changes[(basis1, basis2)] = auto
return basis
def set_name(self, symbol, latex_symbol=None, indices=None,
latex_indices=None, index_position='down',
include_domain=True):
r"""
Set (or change) the text name and LaTeX name of ``self``.
INPUT:
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the local sections constituting the local frame, or a
list/tuple of strings, representing the individual symbols of the
local sections
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the local sections
constituting the local frame, or a list/tuple of strings,
representing the individual LaTeX symbols of the local sections;
if ``None``, ``symbol`` is used in place of ``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a
single string) tuple of strings representing the indices labelling
the local sections of the frame; if ``None``, the indices will be
generated as integers within the range declared on ``self``
- ``latex_indices`` -- (default: ``None``) tuple of strings
representing the indices for the LaTeX symbols of the local sections;
if ``None``, ``indices`` is used instead
- ``index_position`` -- (default: ``'down'``) determines the position
of the indices labelling the local sections of the frame; can be
either ``'down'`` or ``'up'``
- ``include_domain`` -- (default: ``True``) boolean determining whether
the name of the domain is included in the beginning of the vector
frame name
EXAMPLES::
sage: M = Manifold(3, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e'); e
Local frame (E|_M, (e_0,e_1))
sage: e.set_name('f'); e
Local frame (E|_M, (f_0,f_1))
sage: e.set_name('e', include_domain=False); e
Local frame (e_0,e_1)
sage: e.set_name(['a', 'b']); e
Local frame (E|_M, (a,b))
sage: e.set_name('e', indices=['x', 'y']); e
Local frame (E|_M, (e_x,e_y))
sage: e.set_name('e', latex_symbol=r'\epsilon')
sage: latex(e)
\left(E|_{M}, \left(\epsilon_{0},\epsilon_{1}\right)\right)
sage: e.set_name('e', latex_symbol=[r'\alpha', r'\beta'])
sage: latex(e)
\left(E|_{M}, \left(\alpha,\beta\right)\right)
sage: e.set_name('e', latex_symbol='E',
....: latex_indices=[r'\alpha', r'\beta'])
sage: latex(e)
\left(E|_{M}, \left(E_{\alpha},E_{\beta}\right)\right)
"""
super(LocalFrame, self).set_name(symbol, latex_symbol=latex_symbol,
indices=indices,
latex_indices=latex_indices,
index_position=index_position)
if include_domain:
# Redefinition of the name and the LaTeX name to include the domain
self._name = "({}|_{}, {})".format(self._vbundle._name,
self._domain._name, self._name)
self._latex_name = r"\left({}|_{{{}}}, {}\right)".format(
self._vbundle._latex_name,
self._domain._latex_name,
self._latex_name)
#******************************************************************************
class TrivializationCoFrame(LocalCoFrame):
r"""
Trivialization coframe on a vector bundle.
A *trivialization coframe* is the coframe of the trivialization frame
induced by a trivialization (see: :class:`~sage.manifolds.local_frame.TrivializationFrame`).
More precisely, a *trivialization frame* on a vector bundle `E \to M` of
class `C^k` and rank `n` over the topological field `K` and over a
topological manifold `M` is a local coframe induced by a local
trivialization `\varphi:E|_U \to U \times K^n` of the domain `U \in M`.
Namely, the local dual sections
.. MATH::
\varphi^*e^i := \varphi(\;\cdot\;, e^i)
on `U` induce a local frame `(\varphi^*e^1, \dots, \varphi^*e^n)`, where
`(e^1, \dots, e^n)` is the dual of the standard basis of `K^n`.
INPUT:
- ``triv_frame`` -- trivialization frame dual to the trivialization coframe
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the dual sections constituting the coframe, or a tuple of
strings, representing the individual symbols of the dual sections
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the dual sections constituting
the coframe, or a tuple of strings, representing the individual LaTeX
symbols of the dual sections; if ``None``, ``symbol`` is used in place of
``latex_symbol``
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a single
string) tuple of strings representing the indices labelling the dual
sections of the coframe; if ``None``, the indices will be generated as
integers within the range declared on the local frame's domain
- ``latex_indices`` -- (default: ``None``) tuple of strings representing
the indices for the LaTeX symbols of the dual sections of the coframe; if
``None``, ``indices`` is used instead
EXAMPLES:
Trivialization coframe on a trivial vector bundle of rank 3::
sage: M = Manifold(3, 'M', start_index=1, structure='top')
sage: X.<x,y,z> = M.chart()
sage: E = M.vector_bundle(3, 'E')
sage: phi = E.trivialization('phi'); phi
Trivialization (phi, E|_M)
sage: E.frames()
[Trivialization frame (E|_M, ((phi^*e_1),(phi^*e_2),(phi^*e_3)))]
sage: E.coframes()
[Trivialization coframe (E|_M, ((phi^*e^1),(phi^*e^2),(phi^*e^3)))]
sage: f = E.coframes()[0] ; f
Trivialization coframe (E|_M, ((phi^*e^1),(phi^*e^2),(phi^*e^3)))
The linear forms composing the coframe are obtained via the operator
``[]``::
sage: f[1]
Linear form (phi^*e^1) on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3
sage: f[2]
Linear form (phi^*e^2) on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3
sage: f[3]
Linear form (phi^*e^3) on the Free module C^0(M;E) of sections on the
3-dimensional topological manifold M with values in the real vector
bundle E of rank 3
sage: f[1][:]
[1, 0, 0]
sage: f[2][:]
[0, 1, 0]
sage: f[3][:]
[0, 0, 1]
The coframe is the dual of the trivialization frame::
sage: e = phi.frame() ; e
Trivialization frame (E|_M, ((phi^*e_1),(phi^*e_2),(phi^*e_3)))
sage: f[1](e[1]).expr(), f[1](e[2]).expr(), f[1](e[3]).expr()
(1, 0, 0)
sage: f[2](e[1]).expr(), f[2](e[2]).expr(), f[2](e[3]).expr()
(0, 1, 0)
sage: f[3](e[1]).expr(), f[3](e[2]).expr(), f[3](e[3]).expr()
(0, 0, 1)
"""
def __init__(self, triv_frame, symbol, latex_symbol=None,
indices=None, latex_indices=None):
r"""
Construct a local coframe from a local trivialization.
TESTS::
sage: M = Manifold(2, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: phi = E.trivialization('phi')
sage: from sage.manifolds.local_frame import TrivializationCoFrame
sage: f = TrivializationCoFrame(phi.frame(), 'omega'); f
Trivialization coframe (E|_M, (omega^0,omega^1))
sage: TestSuite(f).run()
"""
if not isinstance(triv_frame, TrivializationFrame):
raise TypeError("the first argument must be a local trivialization "
"frame")
LocalCoFrame.__init__(self, triv_frame, symbol,
latex_symbol=latex_symbol, indices=indices,
latex_indices=latex_indices)
self._trivialization = triv_frame._trivialization
def _repr_(self):
r"""
String representation of ``self``.
TESTS::
sage: M = Manifold(3, 'M')
sage: E = M.vector_bundle(2, 'E')
sage: phi = E.trivialization('phi')
sage: e = phi.frame().coframe()
sage: e._repr_()
'Trivialization coframe (E|_M, ((phi^*e^1),(phi^*e^2)))'
sage: repr(e) # indirect doctest
'Trivialization coframe (E|_M, ((phi^*e^1),(phi^*e^2)))'
sage: e # indirect doctest
Trivialization coframe (E|_M, ((phi^*e^1),(phi^*e^2)))
"""
return "Trivialization coframe " + self._name
#******************************************************************************
class TrivializationFrame(LocalFrame):
r"""
Trivialization frame on a topological vector bundle.
A *trivialization frame* on a topological vector bundle `E \to M` of rank
`n` over the topological field `K` and over a topological manifold `M` is a
local frame induced by a local trivialization `\varphi:E|_U \to U \times K^n`
of the domain `U \in M`. More precisely, the local sections
.. MATH::
\varphi^*e_i := \varphi(\;\cdot\;, e_i)
on `U` induce a local frame `(\varphi^*e_1, \dots, \varphi^*e_n)`, where
`(e_1, \dots, e_n)` is the standard basis of `K^n`.
INPUT:
- ``trivialization`` -- the trivialization defined on the vector bundle
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: U = M.open_subset('U')
sage: E = M.vector_bundle(2, 'E')
sage: phi_U = E.trivialization('phi_U', domain=U)
sage: phi_U.frame()
Trivialization frame (E|_U, ((phi_U^*e_1),(phi_U^*e_2)))
sage: latex(phi_U.frame())
\left(E|_{U}, \left(\left(phi_U^* e_{ 1 }\right),\left(phi_U^* e_{ 2 }\right)\right)\right)
"""
# The following class attribute must be redefined by any derived class:
_cobasis_class = TrivializationCoFrame
def __init__(self, trivialization):
r"""
Construct a trivialization frame.
TESTS::
sage: M = Manifold(3, 'M')
sage: E = M.vector_bundle(2, 'E')
sage: phi = E.trivialization('phi')
sage: e = phi.frame()
sage: TestSuite(e).run()
"""
from sage.misc.latex import latex
from .trivialization import Trivialization
if not isinstance(trivialization, Trivialization):
raise TypeError("the first argument must be a trivialization")
###
# Some useful variables:
triv = trivialization
domain = triv.domain()
vbundle = triv.vector_bundle()
###
# Some sanity check:
smodule = vbundle._section_modules.get(domain)
if smodule and not isinstance(smodule, FiniteRankFreeModule):
raise ValueError("the {} has already been constructed as a "
"non-free module and therefore cannot have "
"a basis".format(smodule))
###
# Set trivialization:
self._trivialization = triv
###
# Define trivialization names
rank = vbundle.rank()
symbol = tuple("(" + triv._name + "^*" + "e_" + str(i) + ")"
for i in range(1, rank + 1))
symbol_dual = tuple("(" + triv._name + "^*" + "e^" + str(i) + ")"
for i in range(1, rank + 1))
latex_symbol = tuple(r'\left(' + triv._latex_name + r'^* e_{' +
latex(i) + r'}\right)'
for i in range(1, rank + 1))
latex_symbol_dual = tuple(r'\left(' + triv._latex_name + r'^* e^{' +
latex(i) + r'}\right)'
for i in range(1, rank + 1))
LocalFrame.__init__(self,
vbundle.section_module(domain=domain, force_free=True),
symbol=symbol, latex_symbol=latex_symbol,
symbol_dual=symbol_dual,
latex_symbol_dual=latex_symbol_dual)
def _repr_(self):
r"""
String representation of ``self``.
TESTS::
sage: M = Manifold(3, 'M')
sage: E = M.vector_bundle(2, 'E')
sage: phi = E.trivialization('phi')
sage: e = phi.frame()
sage: e._repr_()
'Trivialization frame (E|_M, ((phi^*e_1),(phi^*e_2)))'
sage: repr(e) # indirect doctest
'Trivialization frame (E|_M, ((phi^*e_1),(phi^*e_2)))'
sage: e # indirect doctest
Trivialization frame (E|_M, ((phi^*e_1),(phi^*e_2)))
"""
return "Trivialization frame " + self._name
def trivialization(self):
r"""
Return the underlying trivialization of ``self``.
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: U = M.open_subset('U')
sage: E = M.vector_bundle(2, 'E')
sage: phi_U = E.trivialization('phi_U', domain=U)
sage: e = phi_U.frame()
sage: e.trivialization()
Trivialization (phi_U, E|_U)
sage: e.trivialization() is phi_U
True
"""
return self._trivialization
|
1626722
|
import sys, os, time, json, random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
sys.path.append(os.getcwd())
class Structure(object):
def __init__(self):
self.first_title = None
self.secord_title = None
self.font_path = "cover_generator/font"
with open("cover_generator/layout.json", 'r') as f0:
self.layout_config = json.load(f0)
self.tb = Image.open("cover_generator/" + self.layout_config["transparent_background"])
# 「4+1」模型:0传统、1简约、2科技、3简洁
# 「3中」模型:4诙谐、5古典、6纵横、7聚焦、8清新
self.structure_map = {
0: self.traditional,
1: self.simplicity,
2: self.technology,
3: self.simplify,
4: self.humorous,
5: self.classical,
6: self.vertical,
7: self.focus,
8: self.fresh
}
self.x_model_map = {
0: "○",
1: "↖",
2: "↙",
3: "↗",
4: "↘",
}
self.l_model_map = {
0: "↑",
1: "○",
2: "↓",
}
def model_init(
self,
config
):
# 加载模板为可绘制对象
draw = ImageDraw.Draw(self.tb)
# 随机字体
current_font = self.random_font()
first_title_font = ImageFont.truetype("cover_generator/font/" + current_font, size=config["text"][0]["size"])
secord_title_font = ImageFont.truetype("cover_generator/font/" + current_font, size=config["text"][1]["size"])
# 计算文字相对位置
first_title_size = draw.textsize(self.first_title, font=first_title_font)
secord_title_size = draw.textsize(self.secord_title, font=secord_title_font)
return draw, first_title_font, secord_title_font, first_title_size, secord_title_size
def l_model(
self,
draw,
config,
first_title_location_x,
first_title_location_y,
component_location,
secord_title_location_x,
secord_title_location_y,
first_title_font,
secord_title_font,
half_max_size,
helf_height
):
# 随机布局
current_location = self.l_location()
first_title_position_x = config["polarization"][current_location]["position"][0] - half_max_size + first_title_location_x
first_title_position_y = config["polarization"][current_location]["position"][1] - helf_height + first_title_location_y
secord_title_position_x = config["polarization"][current_location]["position"][0] - half_max_size + secord_title_location_x
secord_title_position_y = config["polarization"][current_location]["position"][1] - helf_height + secord_title_location_y
component_position = []
for ikey in component_location:
component_position_x = config["polarization"][current_location]["position"][0] - half_max_size + ikey[0]
component_position_y = config["polarization"][current_location]["position"][1] - helf_height + ikey[1]
component_position.append(
[component_position_x, component_position_y, ikey[2]]
)
self.title_render(
draw,
config,
component_position,
first_title_position_x,
first_title_position_y,
secord_title_position_x,
secord_title_position_y,
first_title_font,
secord_title_font
)
def x_model(self, config):
# init
draw, first_title_font, secord_title_font, first_title_size, secord_title_size = self.model_init(config)
# 随机布局
current_location = self.x_location()
# 偏移值为正,上比下长;偏移值为负,下比上长;
max_size = max(first_title_size[0], secord_title_size[0])
# 下边这些全都是相对位置
line_space = first_title_size[1] * config["other"]["space"]
# 定左组件动右组件
if config["polarization"][current_location]["option"] == 0:
# 逐层往下确定绝对定位
component_01_position_x = config["polarization"][current_location]["position"][0]
component_01_position_y = config["polarization"][current_location]["position"][1]
first_title_position_x = component_01_position_x + config["component"][0]["offset"][0]
first_title_position_y = component_01_position_y + config["component"][0]["offset"][1]
secord_title_position_x = component_01_position_x + config["component"][0]["offset"][0]
secord_title_position_y = component_01_position_y + line_space + config["component"][0]["offset"][1]
component_02_position_x = component_01_position_x + max_size + config["component"][1]["offset"][0] + config["component"][0]["offset"][0]
component_02_position_y = component_01_position_y + line_space + config["component"][1]["offset"][1] + config["component"][0]["offset"][1]
component_position = [
[component_02_position_x, component_02_position_y, 1],
[component_01_position_x, component_01_position_y, 0]
]
self.title_render(
draw,
config,
component_position,
first_title_position_x,
first_title_position_y,
secord_title_position_x,
secord_title_position_y,
first_title_font,
secord_title_font
)
# 定右组件动左组件
elif config["polarization"][current_location]["option"] == 1:
component_02 = Image.open("cover_generator/components/" + config["component"][1]["path"])
component_02_position_x = config["polarization"][current_location]["position"][0] - component_02.width
component_02_position_y = config["polarization"][current_location]["position"][1] - component_02.height
secord_title_position_x = config["polarization"][current_location]["position"][0] - config["component"][1]["offset"][0] - secord_title_size[0] - component_02.width
secord_title_position_y = config["polarization"][current_location]["position"][1] - config["component"][1]["offset"][1] - component_02.height
first_title_position_x = config["polarization"][current_location]["position"][0] - config["component"][1]["offset"][0] - first_title_size[0] - component_02.width
first_title_position_y = config["polarization"][current_location]["position"][1] - config["component"][1]["offset"][1] - line_space - component_02.height
component_01_position_x = component_02_position_x - max_size - config["component"][0]["offset"][0]
component_01_position_y = component_02_position_y - line_space - config["component"][0]["offset"][1]
component_position = [
[component_02_position_x, component_02_position_y, 1],
[component_01_position_x, component_01_position_y, 0]
]
self.title_render(
draw,
config,
component_position,
first_title_position_x,
first_title_position_y,
secord_title_position_x,
secord_title_position_y,
first_title_font,
secord_title_font
)
# 定中央动左右
else:
first_title_position_x = config["polarization"][current_location]["position"][0] - first_title_size[0] / 2
first_title_position_y = config["polarization"][current_location]["position"][1] - (first_title_size[1] + secord_title_size[1] + line_space) / 2
secord_title_position_x = config["polarization"][current_location]["position"][0] - secord_title_size[0] / 2
secord_title_position_y = first_title_position_y + line_space
component_01_position_x = config["polarization"][current_location]["position"][0] - max_size / 2 - config["component"][0]["offset"][0]
component_01_position_y = first_title_position_y - config["component"][0]["offset"][1]
component_02_position_x = config["polarization"][current_location]["position"][0] + max_size / 2 + config["component"][1]["offset"][0]
component_02_position_y = secord_title_position_y + config["component"][1]["offset"][1]
component_position = [
[component_02_position_x, component_02_position_y, 1],
[component_01_position_x, component_01_position_y, 0]
]
self.title_render(
draw,
config,
component_position,
first_title_position_x,
first_title_position_y,
secord_title_position_x,
secord_title_position_y,
first_title_font,
secord_title_font
)
def title_render(
self,
draw,
config,
component_position,
first_title_position_x,
first_title_position_y,
secord_title_position_x,
secord_title_position_y,
first_title_font,
secord_title_font
):
# 解析字体及阴影颜色
first_title_color = tuple(eval(config["text"][0]["color"]))
secord_title_color = tuple(eval(config["text"][1]["color"]))
shadow = tuple(eval(config["other"]["shadow"]))
# 加载组件
for ikey in component_position:
component_temp = Image.open("cover_generator/components/" + config["component"][ikey[2]]["path"])
self.tb.paste(component_temp, (int(ikey[0]), int(ikey[1])))
draw.text((first_title_position_x + config["other"]["shadow_offset_x"], first_title_position_y + config["other"]["shadow_offset_y"]), self.first_title, font=first_title_font, fill=shadow)
draw.text((first_title_position_x, first_title_position_y), self.first_title, font=first_title_font, fill=first_title_color)
draw.text((secord_title_position_x + config["other"]["shadow_offset_x"], secord_title_position_y + config["other"]["shadow_offset_y"]), self.secord_title, font=secord_title_font, fill=shadow)
draw.text((secord_title_position_x, secord_title_position_y), self.secord_title, font=secord_title_font, fill=secord_title_color)
self.tb.save('cover_generator/transparent_title.png', quality=100)
def traditional(self):
# init
config = self.layout_config["model"]["traditional"]
self.x_model(config)
def simplicity(self):
# init
config = self.layout_config["model"]["simplicity"]
self.x_model(config)
def technology(self):
# init
config = self.layout_config["model"]["techbuddies"]
self.x_model(config)
def simplify(self):
# init
config = self.layout_config["model"]["simplify"]
self.x_model(config)
def humorous(self):
# init
config = self.layout_config["model"]["humorous"]
draw, first_title_font, secord_title_font, first_title_size, secord_title_size = self.model_init(config)
component = Image.open("cover_generator/components/" + config["component"][0]["path"])
# 以最长组件来对齐
max_size = max(first_title_size[0], secord_title_size[0], component.width)
# 行间距
line_space = first_title_size[1] * config["other"]["space"]
first_title_location_x = max_size / 2 - first_title_size[0] / 2
first_title_location_y = 0
component_location_x = max_size / 2 - component.width / 2
component_location_y = line_space
secord_title_location_x = max_size / 2 - secord_title_size[0] / 2
secord_title_location_y = component_location_y + component.height
height = secord_title_location_y + secord_title_size[1]
component_location = [
[component_location_x, component_location_y, 0]
]
self.l_model(draw, config, first_title_location_x, first_title_location_y, component_location, secord_title_location_x, secord_title_location_y, first_title_font, secord_title_font, max_size / 2, height / 2)
def classical(self):
# init
config = self.layout_config["model"]["classical"]
draw, first_title_font, secord_title_font, first_title_size, secord_title_size = self.model_init(config)
component = Image.open("cover_generator/components/" + config["component"][0]["path"])
component_01_location_x = 0
component_01_location_y = 0
component_02_location_x = config["component"][0]["location"][0]
component_02_location_y = config["component"][0]["location"][1]
first_title_location_x = config["component"][0]["offset"][0]
first_title_location_y = config["component"][0]["offset"][1]
secord_title_location_x = config["component"][0]["location"][0] + component.width - config["component"][1]["offset"][0] - secord_title_size[0]
secord_title_location_y = config["component"][0]["location"][1] + component.height - config["component"][1]["offset"][1] - secord_title_size[1]
component_location = [
[component_01_location_x, component_01_location_y, 0],
[component_02_location_x, component_02_location_y, 0]
]
max_size = component_02_location_x + component.width
height = component_02_location_y + component.height
self.l_model(draw, config, first_title_location_x, first_title_location_y, component_location, secord_title_location_x, secord_title_location_y, first_title_font, secord_title_font, max_size / 2, height / 2)
def vertical(self):
# init
config = self.layout_config["model"]["vertical"]
draw, first_title_font, secord_title_font, first_title_size, secord_title_size = self.model_init(config)
component = Image.open("cover_generator/components/" + config["component"][0]["path"])
# 以最长组件来对齐
max_size = max(first_title_size[0] + secord_title_size[0] + 50, component.width)
component_location_x = 0
component_location_y = 0
first_title_location_x = component.width / 2 - config["component"][0]["offset"][0] - first_title_size[0]
first_title_location_y = component.height / 2 - config["component"][0]["offset"][1] - first_title_size[1]
secord_title_location_x = component.width / 2 + config["component"][1]["offset"][0]
secord_title_location_y = component.height / 2 + config["component"][1]["offset"][1]
height = component.height
component_location = [
[component_location_x, component_location_y, 0]
]
self.l_model(draw, config, first_title_location_x, first_title_location_y, component_location, secord_title_location_x, secord_title_location_y, first_title_font, secord_title_font, max_size / 2, height / 2)
def focus(self):
# init
config = self.layout_config["model"]["focus"]
draw, first_title_font, secord_title_font, first_title_size, secord_title_size = self.model_init(config)
component = Image.open("cover_generator/components/" + config["component"][0]["path"])
# 行间距
line_space = first_title_size[1] * config["other"]["space"]
# 以最长组件来对齐
secord_line_length = 2 * component.width + secord_title_size[0] + config["component"][0]["offset"][0] + config["component"][1]["offset"][0]
max_size = max(secord_line_length, first_title_size[0])
first_title_location_x = (max_size - first_title_size[0]) / 2
first_title_location_y = 0
component_01_location_x = (max_size - secord_line_length) / 2 + config["other"]["line_offset"]
component_01_location_y = line_space
secord_title_location_x = (max_size - secord_line_length) / 2 + component.width + config["component"][0]["offset"][0] + config["other"]["line_offset"]
secord_title_location_y = line_space
component_02_location_x = (max_size - secord_line_length) / 2 + component.width + config["component"][0]["offset"][0] + secord_title_size[0] + config["component"][1]["offset"][0] + config["other"]["line_offset"]
component_02_location_y = line_space
height = max(component.height, secord_title_size[1]) + line_space
component_location = [
[component_01_location_x, component_01_location_y, 0],
[component_02_location_x, component_02_location_y, 0]
]
self.l_model(draw, config, first_title_location_x, first_title_location_y, component_location, secord_title_location_x, secord_title_location_y, first_title_font, secord_title_font, max_size / 2, height / 2)
def fresh(self):
# init
config = self.layout_config["model"]["fresh"]
draw, first_title_font, secord_title_font, first_title_size, secord_title_size = self.model_init(config)
component = Image.open("cover_generator/components/" + config["component"][0]["path"])
# 行间距
line_space = first_title_size[1] * config["other"]["space"]
# 以最长组件来对齐
first_line_length = 2 * component.width + first_title_size[0] + config["component"][0]["offset"][0] + config["component"][1]["offset"][0]
max_size = max(first_line_length, secord_title_size[0])
component_01_location_x = (max_size - first_line_length) / 2
component_01_location_y = config["component"][0]["offset"][1]
first_title_location_x = (max_size - first_line_length) / 2 + component.width + config["component"][0]["offset"][0]
first_title_location_y = 0
component_02_location_x = (max_size - first_line_length) / 2 + component.width + config["component"][0]["offset"][0] + first_title_size[0] + config["component"][1]["offset"][0]
component_02_location_y = config["component"][1]["offset"][1]
secord_title_location_x = (max_size - secord_title_size[0]) / 2 + config["other"]["line_offset"]
secord_title_location_y = line_space
height = secord_title_size[1] + line_space
component_location = [
[component_01_location_x, component_01_location_y, 0],
[component_02_location_x, component_02_location_y, 0]
]
self.l_model(draw, config, first_title_location_x, first_title_location_y, component_location, secord_title_location_x, secord_title_location_y, first_title_font, secord_title_font, max_size / 2, height / 2)
# 返回模型
def x_location(self):
# 「4+1」模型:0居中 1左上 2右上 3右下 4左下
# 随机一个位置
location = random.randint(0, 4)
return self.x_model_map[location]
# 返回模型
def l_location(self):
# 「3中」模型:0中上 1中中 2中下
# 随机一个位置
location = random.randint(0, 2)
return self.l_model_map[location]
# 返回字体
def random_font(self):
font_list = []
for root, dirs, files in os.walk(self.font_path):
for file in files:
if os.path.splitext(file)[-1] == '.ttf':
font_list.append(file)
return random.choice(font_list)
def run(self, first, secord):
self.first_title = first
self.secord_title = secord
# 随机一个模型
model = random.randint(0, 8)
self.structure_map[model]()
|
1626746
|
import subprocess
from lib.resp import Resp
from lib import settings, utils
from subprocess import getoutput
from lib.interface.mac_gen import MacGen
class InterfaceBackend:
wlan = None
monitor_mode = "monitor"
managed_mode = "managed"
interfaces = [
settings.SCAN_INTERFACE,
settings.DEAUTH_INTERFACE,
settings.HANDSHAKE_INTERFACE,
settings.EVIL_TWIN_INTERFACE,
]
@staticmethod
def __get_interface(intf):
cmd = f"airmon-ng | grep {intf} | awk 'NR==1{{print $2}}'"
return getoutput(cmd)
@staticmethod
def __disable_interfaces():
for intf in InterfaceBackend.interfaces:
cmd = f"iw dev {intf} del"
getoutput(cmd)
@staticmethod
def __enable_interfaces(intf):
# disable
getoutput(f"ifconfig {intf} down")
InterfaceBackend.__disable_interfaces()
# enable
for iface in InterfaceBackend.interfaces:
mode = (
InterfaceBackend.monitor_mode
if iface != settings.EVIL_TWIN_INTERFACE
else InterfaceBackend.managed_mode
)
cmd = f"iw dev {intf} interface add {iface} type {mode}"
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).wait()
InterfaceBackend.change_mac(iface, mode, MacGen.generate())
# up interface
getoutput(f"ifconfig {intf} up")
@staticmethod
def __monitor_mode_enabled():
for intf in InterfaceBackend.interfaces:
if not InterfaceBackend.__get_interface(intf):
return False
return True
@staticmethod
def set_monitor_mode(intf):
resp = Resp()
# check if interface exists
if not InterfaceBackend.__get_interface(intf):
resp.msg = "Interface not found"
return resp
# check what interface it is
if intf in [i.lower() for i in InterfaceBackend.interfaces]:
resp.msg = "Invalid interface"
return resp
# check if monitor mode is already set
if InterfaceBackend.__monitor_mode_enabled():
resp.msg = "Monitor mode is already enabled"
resp.status = Resp.SUCCESS_CODE
return resp
# stop processes
utils.kill_all()
utils.stop_services()
# attempt to create interfaces
InterfaceBackend.__enable_interfaces(intf)
# verify
if not InterfaceBackend.__monitor_mode_enabled():
resp.msg = "Failed to enable monitor mode"
return resp
InterfaceBackend.wlan = intf
resp.msg = "Successfully enabled monitor mode"
resp.value = {"interface": intf}
resp.status = Resp.SUCCESS_CODE
return resp
@staticmethod
def disable_interfaces():
if InterfaceBackend.__monitor_mode_enabled():
InterfaceBackend.__disable_interfaces()
@staticmethod
def set_managed_mode():
resp = Resp()
if not InterfaceBackend.__monitor_mode_enabled():
resp.msg = "Monitor mode is not enabled"
return resp
InterfaceBackend.__disable_interfaces()
if InterfaceBackend.__monitor_mode_enabled():
resp.msg = "Failed to disable monitor mode"
return resp
# restart services
utils.restart_services()
InterfaceBackend.wlan = None
resp.msg = "Successfully disabled monitor mode"
resp.status = Resp.SUCCESS_CODE
return resp
@staticmethod
def change_mac(iface, mode, new_mac):
cmd = f"ifconfig {iface} down && iwconfig {iface} mode {mode} && macchanger -m {new_mac} {iface} && ifconfig {iface} up"
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).wait()
@staticmethod
def status():
resp = Resp()
status = {"monitor-mode": False}
if InterfaceBackend.__monitor_mode_enabled():
status["interface"] = InterfaceBackend.wlan
status["monitor-mode"] = True
resp.value = status
resp.status = Resp.SUCCESS_CODE
return resp
|
1626766
|
from collections import defaultdict
import data_io
def main():
print("Getting features for valid papers from the database")
data = data_io.get_features_db("ValidPaper")
author_paper_ids = [x[:2] for x in data]
features = [x[2:] for x in data]
print("Loading the classifier")
classifier = data_io.load_model()
print("Making predictions")
predictions = classifier.predict_proba(features)[:,1]
predictions = list(predictions)
author_predictions = defaultdict(list)
paper_predictions = {}
for (a_id, p_id), pred in zip(author_paper_ids, predictions):
author_predictions[a_id].append((pred, p_id))
for author_id in sorted(author_predictions):
paper_ids_sorted = sorted(author_predictions[author_id], reverse=True)
paper_predictions[author_id] = [x[1] for x in paper_ids_sorted]
print("Writing predictions to file")
data_io.write_submission(paper_predictions)
if __name__=="__main__":
main()
|
1626792
|
from __future__ import division
import sys
import kenlm
from marmot.features.feature_extractor import FeatureExtractor
class LMFeatureExtractor(FeatureExtractor):
def __init__(self, lm_file):
self.model = kenlm.LanguageModel(lm_file)
def get_features(self, context_obj):
#sys.stderr.write("Start LMFeatureExtractor\n")
log_prob = self.model.score(' '.join(context_obj['token']), bos=False, eos=False)
tg_len = len(context_obj['token'])
perplexity = 2**((-1/tg_len)*log_prob)
#sys.stderr.write("Finish LMFeatureExtractor\n")
return [str(log_prob), str(perplexity)]
def get_feature_names(self):
return ['target_log_prob', 'target_perplexity']
|
1626794
|
from __future__ import unicode_literals
import codecs
import numpy
import os
import transaction
from base64 import b64decode
from hashlib import sha1
import itertools
from pyramid_addons.helpers import (http_created, http_gone, http_ok)
from pyramid_addons.validation import (EmailAddress, Enum, List, Or, String,
RegexString, TextNumber,
WhiteSpaceString, validate, SOURCE_GET,
SOURCE_MATCHDICT as MATCHDICT)
from pyramid.httpexceptions import (HTTPBadRequest, HTTPConflict, HTTPError,
HTTPFound, HTTPNotFound, HTTPOk,
HTTPRedirection, HTTPSeeOther)
from pyramid.response import FileResponse, Response
from pyramid.security import forget, remember
from pyramid.settings import asbool
from pyramid.view import (forbidden_view_config, notfound_view_config,
view_config)
import re
from sqlalchemy.exc import IntegrityError
import yaml
from zipfile import ZipFile
from .diff_render import HTMLDiff
from .exceptions import GroupWithException, InvalidId, SubmitException
from .helpers import (
AccessibleDBThing, DBThing as AnyDBThing, DummyTemplateAttr,
EditableDBThing, TestableStatus, TextDate, ViewableDBThing, UmailAddress,
add_user, clone, fetch_request_ids, file_verifier_verification,
prepare_renderable, prev_next_submission, prev_next_group,
project_file_create, project_file_delete, send_email,
test_case_verification, zip_response,zip_response_adv)
from .models import (BuildFile, Class, ExecutionFile, File, FileVerifier,
Group, GroupRequest, PasswordReset, Project, Session,
Submission, SubmissionToFile, TestCase, Testable, User,
UserToGroup)
# Hack for old pickle files
# TODO: Migrate this data to not use pickle
import sys
import submit
sys.modules['nudibranch'] = submit
sys.modules['nudibranch.diff_unit'] = submit.diff_unit
sys.modules['nudibranch.models'] = submit.models
# A few reoccuring validators
OUTPUT_SOURCE = Enum('output_source', 'stdout', 'stderr', 'file')
OUTPUT_TYPE = Enum('output_type', 'diff', 'image', 'text')
SHA1_VALIDATOR = String('sha1sum', min_length=40, max_length=40,
source=MATCHDICT)
UUID_VALIDATOR = String('token', min_length=36, max_length=36,
source=MATCHDICT)
# We need a specific view config for each of HTTPError, HTTPOk, and
# HTTPRedirection as HTTPException will not work as a context. Because python
# has explicit decorators for forbidden and notfound (and we use them) we must
# also use those decorators here.
@forbidden_view_config(xhr=True, renderer='json')
@notfound_view_config(xhr=True, renderer='json')
@view_config(context=HTTPError, xhr=True, renderer='json')
@view_config(context=HTTPOk, xhr=True, renderer='json')
@view_config(context=HTTPRedirection, xhr=True, renderer='json')
def json_exception(context, request):
"""Always return json content in the body of Exceptions to xhr requests."""
request.response.status = context.code
return {'error': context._status, 'messages': context.message}
# Prevent PredicateMismatch exception
@view_config(context=HTTPError)
@view_config(context=HTTPOk)
@view_config(context=HTTPRedirection)
def normal_exception(context, request):
"""Just return the normal context"""
return context
@forbidden_view_config()
def forbidden_view(context, request):
if request.user:
return context
request.session.flash('You must be logged in to do that.', 'warnings')
return HTTPSeeOther(request.route_path('session',
_query={'next': request.path}))
@notfound_view_config()
def not_found(request):
return Response('Not Found', status='404 Not Found')
@view_config(route_name='robots', request_method='GET', http_cache=86400)
def robots(request):
return Response(body='User-agent: *\nDisallow: /\n',
content_type=str('text/plain'))
@view_config(route_name='build_file', request_method='PUT',
permission='authenticated', renderer='json')
@validate(file_=ViewableDBThing('file_id', File),
filename=String('filename', min_length=1),
project=EditableDBThing('project_id', Project))
def build_file_create(request, file_, filename, project):
return project_file_create(request, file_, filename, project, BuildFile)
@view_config(route_name='build_file_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(build_file=EditableDBThing('build_file_id', BuildFile,
source=MATCHDICT))
def build_file_delete(request, build_file):
return project_file_delete(request, build_file)
@view_config(route_name='class.admins', renderer='json', request_method='PUT')
@validate(class_=EditableDBThing('class_id', Class, source=MATCHDICT),
user=AnyDBThing('email', User, fetch_by='username',
validator=EmailAddress('email')))
def class_admins_add(request, class_, user):
if user in class_.admins:
raise HTTPConflict('That user is already an admin for the class.')
user.admin_for.append(class_)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('The user could not be added.')
request.session.flash('Added {} as an admin to the class.'.format(user),
'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='class.admins', request_method='GET',
permission='authenticated',
renderer='templates/forms/class_admins.pt')
@validate(class_=EditableDBThing('class_id', Class, source=MATCHDICT))
def class_admins_view(request, class_):
return {'class_': class_}
@view_config(route_name='class', request_method='PUT', permission='admin',
renderer='json')
@validate(name=String('name', min_length=3))
def class_create(request, name):
class_ = Class(name=name)
Session.add(class_)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Class \'{0}\' already exists'.format(name))
request.session.flash('Created class {}'.format(name), 'successes')
return http_created(request,
redir_location=request.route_path('class_new'))
@view_config(route_name='class_new', request_method='GET',
renderer='templates/forms/class_create.pt', permission='admin')
def class_edit(request):
return {'classes': sorted(Class.query_by().all())}
@view_config(route_name='class_item', request_method='JOIN',
permission='authenticated', renderer='json')
@validate(class_=AnyDBThing('class_id', Class, source=MATCHDICT))
def class_join(request, class_):
if class_.is_locked:
raise HTTPBadRequest('Invalid class')
request.user.classes.append(class_)
request.session.flash('You have joined {}'.format(class_.name),
'successes')
url = request.route_path('user_item', username=request.user.username)
return http_created(request, redir_location=url)
@view_config(route_name='class_item', request_method='GET',
renderer='templates/class_view.pt', permission='authenticated')
@validate(class_=AnyDBThing('class_id', Class, source=MATCHDICT))
def class_view(request, class_):
class_admin = class_.is_admin(request.user)
recent_subs = None
if class_admin:
project_ids = [x.id for x in class_.projects]
if project_ids:
recent_subs = (Submission.query_by()
.filter(Submission.project_id.in_(project_ids))
.order_by(Submission.created_at.desc()).limit(16)
.all())
return {'class_': class_, 'class_admin': class_admin,
'recent_subs': recent_subs}
@view_config(route_name='execution_file', request_method='PUT',
permission='authenticated', renderer='json')
@validate(file_=ViewableDBThing('file_id', File),
filename=String('filename', min_length=1),
project=EditableDBThing('project_id', Project))
def execution_file_create(request, file_, filename, project):
return project_file_create(request, file_, filename, project,
ExecutionFile)
@view_config(route_name='execution_file_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(execution_file=EditableDBThing('execution_file_id', ExecutionFile,
source=MATCHDICT))
def execution_file_delete(request, execution_file):
return project_file_delete(request, execution_file)
@view_config(route_name='file_item', request_method='PUT', renderer='json',
permission='authenticated')
@validate(b64data=WhiteSpaceString('b64data'), sha1sum=SHA1_VALIDATOR)
def file_create(request, b64data, sha1sum):
data = b64decode(b64data.encode('ascii'))
# Verify the sha1 matches
expected_sha1 = sha1(data).hexdigest()
if sha1sum != expected_sha1:
msg = 'sha1sum does not match expected: {0}'.format(expected_sha1)
raise HTTPBadRequest(msg)
# fetch or create (and save to disk) the file
base_path = request.registry.settings['file_directory']
file_ = File.fetch_or_create(data, base_path, sha1sum=sha1sum)
# associate user with the file
request.user.files.add(file_)
return {'file_id': file_.id}
@view_config(route_name='file_item', request_method='INFO',
permission='authenticated', renderer='json')
@validate(file_=ViewableDBThing('sha1sum', File, fetch_by='sha1',
validator=SHA1_VALIDATOR, source=MATCHDICT))
def file_item_info(request, file_):
return {'file_id': file_.id, 'owns_file': file_ in request.user.files}
@view_config(route_name='file_item', request_method='GET',
permission='authenticated', renderer='templates/file_view.pt')
@validate(file_=ViewableDBThing('sha1sum', File, fetch_by='sha1',
validator=SHA1_VALIDATOR, source=MATCHDICT),
filename=String('filename', min_length=1, source=MATCHDICT),
raw=TextNumber('raw', min_value=0, max_value=1,
optional=True, source=SOURCE_GET))
def file_item_view(request, file_, filename, raw):
source = File.file_path(request.registry.settings['file_directory'],
file_.sha1)
if raw:
return FileResponse(source, request)
try:
contents = codecs.open(source, encoding='utf-8').read()
except UnicodeDecodeError as exc:
contents = 'File contents could not be displayed: {}'.format(exc)
return {'contents': contents,
'filename': filename,
'url': request.route_path('file_item', sha1sum=file_.sha1,
filename=filename, _query={'raw': '1'})}
@view_config(route_name='file_verifier', request_method='PUT',
permission='authenticated', renderer='json')
@validate(copy_to_execution=TextNumber('copy_to_execution', min_value=0,
max_value=1, optional=True),
filename=String('filename', min_length=1),
min_size=TextNumber('min_size', min_value=0),
max_size=TextNumber('max_size', min_value=0, optional=True),
min_lines=TextNumber('min_lines', min_value=0),
max_lines=TextNumber('max_lines', min_value=0, optional=True),
optional=TextNumber('optional', min_value=0, max_value=1,
optional=True),
project=EditableDBThing('project_id', Project),
warning_regex=RegexString('warning_regex', optional=True))
@file_verifier_verification
def file_verifier_create(request, copy_to_execution, filename, min_size,
max_size, min_lines, max_lines, optional, project,
warning_regex):
# Check for build-file conflict
if not optional and BuildFile.fetch_by(project=project, filename=filename):
msg = ('A build file already exists with that name. '
'Provide a different name, or mark as optional.')
raise HTTPBadRequest(msg)
filev = FileVerifier(copy_to_execution=bool(copy_to_execution),
filename=filename, min_size=min_size,
max_size=max_size, min_lines=min_lines,
max_lines=max_lines, optional=bool(optional),
project=project, warning_regex=warning_regex)
Session.add(filev)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That filename already exists for the project')
request.session.flash('Added expected file: {}'.format(filename),
'successes')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='file_verifier_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(file_verifier=EditableDBThing('file_verifier_id', FileVerifier,
source=MATCHDICT))
def file_verifier_delete(request, file_verifier):
return project_file_delete(request, file_verifier)
@view_config(route_name='file_verifier_item', request_method='POST',
permission='authenticated', renderer='json')
@validate(copy_to_execution=TextNumber('copy_to_execution', min_value=0,
max_value=1, optional=True),
file_verifier=EditableDBThing('file_verifier_id', FileVerifier,
source=MATCHDICT),
filename=String('filename', min_length=1),
min_size=TextNumber('min_size', min_value=0),
max_size=TextNumber('max_size', min_value=0, optional=True),
min_lines=TextNumber('min_lines', min_value=0),
max_lines=TextNumber('max_lines', min_value=0, optional=True),
optional=TextNumber('optional', min_value=0, max_value=1,
optional=True),
warning_regex=RegexString('warning_regex', optional=True))
@file_verifier_verification
def file_verifier_update(request, copy_to_execution, file_verifier, filename,
min_size, max_size, min_lines, max_lines, optional,
warning_regex):
# Check for build-file conflict
if not optional and BuildFile.fetch_by(project=file_verifier.project,
filename=filename):
msg = ('A build file already exists with that name. '
'Provide a different name, or mark as optional.')
raise HTTPBadRequest(msg)
if not file_verifier.update(copy_to_execution=bool(copy_to_execution),
filename=filename, min_size=min_size,
max_size=max_size, min_lines=min_lines,
max_lines=max_lines, optional=bool(optional),
warning_regex=warning_regex):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That filename already exists for the project')
request.session.flash('Updated expected file: {}'.format(filename),
'successes')
redir_location = request.route_path('project_edit',
project_id=file_verifier.project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='home', request_method='GET')
def home(request):
if request.user:
url = request.route_path('user_item', username=request.user.username)
else:
url = request.route_path('session')
raise HTTPFound(location=url)
@view_config(route_name='password_reset', request_method='PUT',
renderer='json')
@validate(username=EmailAddress('email'))
def password_reset_create(request, username):
if username == 'admin':
raise HTTPConflict('Hahaha, nice try!')
user = User.fetch_by(username=username)
if not user:
raise HTTPConflict('Invalid email')
password_reset = PasswordReset.generate(user)
failure_message = 'You were already sent a password reset email.'
if password_reset:
Session.add(password_reset)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict(failure_message)
site_name = request.registry.settings['site_name']
reset_url = request.route_url('password_reset_item',
token=password_reset.get_token())
body = ('Visit the following link to reset your password:\n\n{0}'
.format(reset_url))
send_email(request, recipients=user.username, body=body,
subject='{0} password reset email'.format(site_name))
return http_ok(request,
message='A password reset link will be emailed to you.')
else:
raise HTTPConflict(failure_message)
@view_config(route_name='password_reset', request_method='GET',
renderer='templates/forms/password_reset.pt')
def password_reset_edit(request):
return {}
@view_config(route_name='password_reset_item', request_method='GET',
renderer='templates/forms/password_reset_item.pt')
@validate(reset=AnyDBThing('token', PasswordReset, fetch_by='reset_token',
validator=UUID_VALIDATOR, source=MATCHDICT))
def password_reset_edit_item(request, reset):
return {'token': reset.get_token()}
@view_config(route_name='password_reset_item', renderer='json',
request_method='PUT')
@validate(username=EmailAddress('email'),
password=WhiteSpaceString('password', min_length=6),
reset=AnyDBThing('token', PasswordReset, fetch_by='reset_token',
validator=UUID_VALIDATOR, source=MATCHDICT))
def password_reset_item(request, username, password, reset):
if reset.user.username != username:
raise HTTPConflict('The reset token and username '
'combination is not valid.')
reset.user.password = password
Session.delete(reset)
Session.flush()
request.session.flash('Your password has been updated!', 'successes')
redir_location = request.route_path('session',
_query={'username': username})
return http_ok(request, redir_location=redir_location)
@view_config(route_name='project', request_method='CLONE',
permission='authenticated', renderer='json')
@validate(class_=EditableDBThing('class_id', Class),
name=String('name', min_length=2),
src_project=ViewableDBThing('project_id', Project))
def project_clone(request, class_, name, src_project):
# Additional check as we can clone projects whose classes are locked,
# but we cannot clone projects that are locked
if src_project.status not in (u'notready', u'ready'):
raise HTTPConflict('Cannot clone a project with status: {}'
.format(src_project.status))
# Build a copy of the project settings
update = {'class_': class_, 'status': 'notready', 'name': name}
project = clone(src_project, ('class_id',), update)
Session.autoflush = False # Don't flush while testing for changes
# Copy project "files" keeping a mapping between src and dst objects
mapping = {'build_files': {}, 'execution_files': {}, 'file_verifiers': {}}
for attr in mapping:
for item in getattr(src_project, attr):
new = clone(item, ('project_id',))
getattr(project, attr).append(new)
mapping[attr][item] = new
# Copy project testables
for src_testable in src_project.testables:
testable = clone(src_testable, ('project_id',))
project.testables.append(testable)
# Set testable "files" with the appropriate "new" file
for attr, file_mapping in mapping.items():
getattr(testable, attr).extend(file_mapping[x] for x
in getattr(src_testable, attr))
# Copy test cases
testable.test_cases = [clone(x, ('testable_id',))
for x in src_testable.test_cases]
Session.add(project)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('The name `{0}` already exists for the class.'
.format(name))
request.session.flash('Cloned {} {} as {}'.format(src_project.class_.name,
src_project.name,
name),
'successes')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='project', request_method='PUT',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=2),
class_=EditableDBThing('class_id', Class),
makefile=ViewableDBThing('makefile_id', File, optional=True))
def project_create(request, name, class_, makefile):
project = Project(name=name, class_=class_, makefile=makefile)
Session.add(project)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That project name already exists for the class')
redir_location = request.route_path('project_edit', project_id=project.id)
request.session.flash('Project added!', 'successes')
return http_created(request, redir_location=redir_location)
@view_config(route_name='project_item_download', request_method='GET',
permission='authenticated')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_download(request, project):
def file_path(file_):
return File.file_path(request.registry.settings['file_directory'],
file_.sha1)
files = []
for sub in project.recent_submissions():
users = sub.group.users_str.replace(' ', '_').replace(',', '-')
user_path = '{0}_{1}'.format(users, sub.id)
for filename, file_ in sub.file_mapping().items():
files.append((os.path.join(project.name, user_path, filename),
file_path(file_)))
return zip_response(request, project.name + '.zip', files)
@view_config(route_name='project_edit',
renderer='templates/forms/project_edit.pt',
request_method='GET', permission='authenticated')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_edit(request, project):
action = request.route_path('project_item_summary',
class_id=project.class_.id,
project_id=project.id)
return {'project': project, 'action': action}
def full_fname(fname, project):
return project.name + "/" + fname
@view_config(route_name='project_export',
request_method='GET', permission='authenticated',
renderer='json')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_export(request, project):
response = []
base_path = request.registry.settings['file_directory']
response.append(("text", full_fname("README.txt", project), """
Project %s
This is a full copy of the testables and test cases in this project.
It may be imported again using the import feature""" % project.name))
def make_big_string(text, filename):
response.append(("text", full_fname(filename, project), text))
return {
"File": filename
}
project_yml_dict = {}
project_yml_dict["Name"] = project.name
project_yml_dict["ExpectedFiles"] = {
expected_file.filename : {
"CopyToExecution": expected_file.copy_to_execution,
"MinSize": expected_file.min_size,
"MaxSize": expected_file.max_size,
"MinLines": expected_file.min_lines,
"MaxLines": expected_file.max_lines,
"Optional": expected_file.optional,
"WarningRegex": expected_file.warning_regex,
} for expected_file in project.file_verifiers
}
response.append(("text", full_fname("project.yml", project), yaml.safe_dump(project_yml_dict, default_flow_style=False)))
if project.makefile is not None:
response.append(("file", full_fname("Makefile", project), File.file_path(base_path,project.makefile.sha1)))
for buildfile in project.build_files:
response.append(("file", full_fname("build_files/" + buildfile.filename, project), File.file_path(base_path,buildfile.file.sha1)))
for execution in project.execution_files:
response.append(("file", full_fname("execution_files/" + execution.filename, project), File.file_path(base_path,execution.file.sha1)))
for testable in project.testables:
# create a dictionary that will represent the testable
testable_dict = {}
testable_dict["BuildFiles"] = [file.filename for file in testable.build_files]
testable_dict["ExecutionFiles"] = [file.filename for file in testable.execution_files]
testable_dict["ExpectedFiles"] = [file.filename for file in testable.file_verifiers]
testable_dict["MakeTarget"] = testable.make_target
testable_dict["Executable"] = testable.executable
testable_dict["IsHidden"] = testable.is_hidden
testable_dict["TestCases"] = {}
for test_case in testable.test_cases:
# this is the basepath where we will write out long text objects if necessary
testcase_basepath = ("testables/%s/%s" % (testable.name, test_case.name))
# create a dict to hold the information for the test case!
tc_dict = {}
tc_dict["HideExpected"] = test_case.hide_expected
tc_dict["Points"] = test_case.points
tc_dict["Command"] = test_case.args
if test_case.stdin != None:
with open(File.file_path(base_path,test_case.stdin.sha1), 'r') as fin:
tc_dict["Input"] = make_big_string(fin.read(), testcase_basepath + ".stdin")
if test_case.expected != None:
with open(File.file_path(base_path,test_case.expected.sha1), 'r') as fout:
tc_dict["Output"] = make_big_string(fout.read(), testcase_basepath + "." + test_case.source)
tc_dict["Output"]["Source"] = test_case.source
if (tc_dict["Output"]["Source"] == "file"):
tc_dict["Output"]["Source"] = test_case.output_filename
testable_dict["TestCases"][test_case.name] = tc_dict
response.append((
"text",
full_fname("testables/%s/%s.yml" % (testable.name,testable.name), project),
yaml.safe_dump(testable_dict, default_flow_style=False)
))
return zip_response_adv(request, project.name + ".zip", response)
@view_config(route_name='project_import', request_method='POST',
permission='authenticated', renderer='json')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
# @validate(file=ViewableDBThing('makefile_id', File, optional=False),
# project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_import(request, project):
import_filename = request.POST['file'].filename
import_file = request.POST['file'].file
# create a file in the backing filesystem for each file in the zip archive!
base_path = request.registry.settings['file_directory']
with ZipFile(import_file,"r") as myzip:
# upload every file we were given to the backing store... this may not acutally be the best approach
submit_files = {path.strip("/") : File.fetch_or_create(myzip.read(path), base_path) for path in myzip.namelist()}
#return myzip.namelist()
file_list = sorted([path for path,v in submit_files.iteritems()])
# we now clear out all of the old testables
# TODO: back these up to a temporary location before reomoving them incase of encountering errors!
# alternatively only allow imports on empty projects ?
#project.testables[:] = []
class Filesystem(object):
#paths need to have trailing slashes
def __init__(self, file):
self._file = file
self._files = file.namelist()
def listdir(self, path):
pathlen = len(path)
files = [fname[pathlen:] for fname in self._files if fname.startswith(path)]
files = [fname for fname in files if '/' not in fname or fname.index('/') == len(fname)-1]
return files
def findroot(self,path=""):
folders = [folder for folder in self.listdir(path) if folder[-1:] == '/' and '__MAC' not in folder]
print(folders)
if ("project.yml" not in self.listdir(path)) and (len(folders) == 1):
return self.findroot(folders[0])
elif "project.yml" in self.listdir(path):
return path
else:
raise SubmitException("Failed to find project.yml in root")
def build_file_tree(dirlist, fullpath=""):
dirs = filter(lambda x: "/" in x, dirlist)
files = filter(lambda x: "/" not in x, dirlist)
return dict({
dirname : build_file_tree([ x[x.index("/")+1:] for x in dirlist ], fullpath=fullpath + "/" + dirname)
for dirname, dirlist in itertools.groupby(dirs, lambda x: x[0 : x.index("/")])
}, **{
file : fullpath + "/" + file
for file in files
})
#need to refactor out submit files
#creating for makefile
def get_or_create_file(input, rootdir = "/"):
t = type(input)
if t == str:
return File.fetch_or_create(input, base_path)
if t == dict:
if "File" in input:
fpath = (os.path.join(rootdir, str(input["File"]))).strip("/")
if fpath not in submit_files:
raise SubmitException("File %s not found in project.zip" % fpath)
else:
return submit_files[fpath]
elif "Text" in input:
return File.fetch_or_create(input, base_path)
raise SubmitException("Failed to load a file from the key")
#creating for expected files, testables, and buildfiles
#the root_dir should contain the yml, testables, makefile, execution files, and build files
try:
fs = Filesystem(myzip)
root_dir = fs.listdir("")
try:
root_dir = fs.findroot()
except SubmitException as error:
raise SubmitException("Encountered excpetion: " + str(error) + " while finding project.yml")
print("Testables", fs.listdir(os.path.join(root_dir, "testables")))
if len(fs.listdir(os.path.join(root_dir, "testables"))) == 0:
request.session.flash("Nonfatal exception 'no testables defined' was encountered. Continuing", 'errors')
project_yml = yaml.safe_load(myzip.read(root_dir + "project.yml").decode('utf-8'))
#"importing" expected files
try:
if "ExpectedFiles" in project_yml:
project.expected_files = []
for file in project_yml["ExpectedFiles"]:
expect_file = FileVerifier(
filename = file,
copy_to_execution = project_yml["ExpectedFiles"][file]["CopyToExecution"],
min_size = project_yml["ExpectedFiles"][file]["MinSize"],
max_size = project_yml["ExpectedFiles"][file]["MaxSize"],
min_lines = project_yml["ExpectedFiles"][file]["MinLines"],
max_lines = project_yml["ExpectedFiles"][file]["MaxLines"],
optional = project_yml["ExpectedFiles"][file]["Optional"],
project_id = project.id,
warning_regex = project_yml["ExpectedFiles"][file]["WarningRegex"]
)
Session.add(expect_file)
project.expected_files.append(expect_file)
else:
print("file: %s is empty" % file)
except SubmitException as error:
raise SubmitException("Encountered exception: " + str(error) + " while processing Expected FIles")
#importing makefile
try:
if "Makefile" in project_yml:
project.makefile = get_or_create_file(project_yml["Makefile"], rootdir=root_dir)
except SubmitException as error:
raise SubmitException("Encountered exception: " + str(error) + " while processing Makefile")
#import execution files
try:
execution_dir = os.path.join(root_dir,"execution_files/")
project.execution_files = []
for file in fs.listdir(execution_dir):
if file:
file_obj = File.fetch_or_create(myzip.read(os.path.join(execution_dir, file)), base_path)
exec_file = ExecutionFile(
project = project,
file = file_obj,
filename = file
)
Session.add(exec_file)
project.execution_files.append(exec_file)
else:
print("file: %s is empty" % file)
except:
raise SubmitException("Encountered exception while adding execution files")
#importing build files
build_dir = os.path.join(root_dir,"build_files/")
project.build_files = []
for file in fs.listdir(build_dir):
if file:
print("appending %s" % file)
file_obj = File.fetch_or_create(myzip.read(os.path.join(build_dir, file)), base_path)
build_file = BuildFile(
project = project,
file = file_obj,
filename = file
)
Session.add(build_file)
project.build_files.append(build_file)
else:
print("file: %s is empty" % file)
#importing testables
try:
testables_dir = os.path.join(root_dir,"testables/")
project.testables = []
for testable_folder in fs.listdir(testables_dir):
if "/" in testable_folder:
print(testable_folder)
testable_yml = yaml.safe_load(myzip.read(os.path.join(testables_dir,testable_folder) + ("%s.yml" % testable_folder.strip("/"))).decode('utf-8'))
testable_file = Testable(
#build_files = testable_yml["BuildFiles"],
executable = testable_yml["Executable"],
#execution_files = testable_yml["ExecutionFiles"],
#file_verifiers = testable_yml["ExpectedFiles"],
is_hidden = testable_yml["IsHidden"],
make_target = testable_yml["MakeTarget"],
name = testable_folder.strip("/"),
project_id = project.id
)
#print(testable_yml["BuildFiles"])
testable_file.test_cases = []
if "TestCases" in testable_yml:
for test_case_name in testable_yml["TestCases"]:
test_cases = TestCase(
args = testable_yml["TestCases"][test_case_name]["Args"],
expected = get_or_create_file(testable_yml["TestCases"][test_case_name]["STDOUT"], rootdir=testable_folder),
hide_expected = testable_yml["TestCases"][test_case_name]["HideExpected"],
name = test_case_name,
points = testable_yml["TestCases"][test_case_name]["Points"],
stdin = get_or_create_file(testable_yml["TestCases"][test_case_name]["STDIN"], rootdir=testable_folder)
)
Session.add(test_cases)
testable_file.test_cases.append(test_cases)
testable_file.build_files = []
if "BuildFiles" in testable_yml:
for files in testable_yml["BuildFiles"]:
build_file = BuildFile.fetch_by(project=project, filename=files)
Session.add(build_file)
testable_file.build_files.append(build_file)
testable_file.execution_files = []
if "ExecutionFiles" in testable_yml:
for files in testable_yml["ExecutionFiles"]:
execution_file = ExecutionFile.fetch_by(project=project, filename=files)
Session.add(execution_file)
testable_file.execution_files.append(execution_file)
testable_file.file_verifiers = []
if "ExpectedFiles" in testable_yml:
for files in testable_yml["ExpectedFiles"]:
expected_file = FileVerifier.fetch_by(project=project, filename=files)
Session.add(expected_file)
testable_file.file_verifiers.append(expected_file)
Session.add(testable_file)
project.testables.append(testable_file)
except SubmitException as error:
raise SubmitException("Encountered exception while adding testables")
#return project_yml
except SubmitException as error:
request.session.flash("Error: " + str(error), 'errors')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Session could not fluch, reccomending stool softeners')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_ok(request, redir_location=redir_location)
#expected files are instances of file verifiers
# def testable_create(request, name, is_hidden, make_target, executable,
# build_file_ids, execution_file_ids, file_verifier_ids,
# project):
# if make_target and not project.makefile:
# msg = 'make_target cannot be specified without a make file'
# raise HTTPBadRequest(msg)
# try:
# # Verify the ids actually exist and are associated with the project
# build_files = fetch_request_ids(build_file_ids, BuildFile,
# 'build_file_id',
# project.build_files)
# execution_files = fetch_request_ids(execution_file_ids, ExecutionFile,
# 'execution_file_id')
# file_verifiers = fetch_request_ids(file_verifier_ids, FileVerifier,
# 'file_verifier_id',
# project.file_verifiers)
# except InvalidId as exc:
# raise HTTPBadRequest('Invalid {0}'.format(exc.message))
# testable = Testable(name=name, is_hidden=bool(is_hidden),
# make_target=make_target,
# executable=executable, project=project,
# build_files=build_files,
# execution_files=execution_files,
# file_verifiers=file_verifiers)
# redir_location = request.route_path('project_edit', project_id=project.id)
# Session.add(testable)
# try:
# Session.flush()
# except IntegrityError:
# raise HTTPConflict('That name already exists for the project')
# return http_created(request, redir_location=redir_location,
# testable_id=testable.id)
# @view_config(route_name='project_item_summary', request_method='POST',
# permission='authenticated', renderer='json')
# @validate(name=String('name', min_length=2),
# makefile=ViewableDBThing('makefile_id', File, optional=True),
# is_ready=TextNumber('is_ready', min_value=0, max_value=1,
# optional=True),
# deadline=TextDate('deadline', optional=True),
# delay_minutes=TextNumber('delay_minutes', min_value=1),
# group_max=TextNumber('group_max', min_value=1),
# project=EditableDBThing('project_id', Project, source=MATCHDICT))
# def project_update(request, name, makefile, is_ready, deadline, delay_minutes,
# group_max, project):
# # Fix timezone if it doesn't exist
# if project.deadline and deadline and not deadline.tzinfo:
# deadline = deadline.replace(tzinfo=project.deadline.tzinfo)
# if not project.update(name=name, makefile=makefile, deadline=deadline,
# delay_minutes=delay_minutes,
# group_max=group_max,
# status=u'ready' if bool(is_ready) else u'notready'):
# return http_ok(request, message='Nothing to change')
# try:
# Session.flush()
# except IntegrityError:
# raise HTTPConflict('That project name already exists for the class')
# request.session.flash('Project updated', 'successes')
# redir_location = request.route_path('project_edit', pro
@view_config(route_name='project_group', request_method='JOIN',
permission='authenticated', renderer='json')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT),
users=List('user_ids', ViewableDBThing('', User), min_elements=2,
max_elements=2))
def project_group_admin_join(request, project, users):
try:
group = users[0].group_with(users[1], project, bypass_limit=True)
except GroupWithException as exc:
request.session.flash(exc.args[0], 'errors')
group = None
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Could not join the users at this time.')
redir_location = request.route_path('group_admin', project_id=project.id)
if not group:
return http_gone(request, redir_location=redir_location)
request.session.flash('Made group: {}'.format(group.users_str),
'successes')
redir_location = request.route_path('group_admin', project_id=project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='group_admin', request_method='GET',
renderer='templates/forms/group_admin.pt',
permission='authenticated')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_group_admin_view(request, project):
students = set(project.class_.users)
selectable = []
for group in project.groups:
students = students - set(group.users)
selectable.append((group.users_str, group.group_assocs[0].user.id))
selectable.extend((x.name, x.id) for x in students)
return {'project': project, 'selectable': selectable}
@view_config(route_name='project_group_item', renderer='json',
request_method='PUT')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
group_request=EditableDBThing('group_request_id', GroupRequest,
source=MATCHDICT))
def project_group_request_confirm(request, project, group_request):
try:
request.user.group_with(group_request.from_user, project)
failed = False
except GroupWithException as exc:
request.session.flash(exc.args[0], 'errors')
failed = True
Session.delete(group_request)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Could not join the group at this time.')
url = request.route_url('project_group', project_id=project.id)
if failed:
return http_gone(request, redir_location=url)
request.session.flash('Joined group with {}'
.format(group_request.from_user), 'successes')
return http_ok(request, redir_location=url)
@view_config(route_name='project_group', renderer='json', request_method='PUT')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
username=EmailAddress('email'))
def project_group_request_create(request, project, username):
if not request.user.can_join_group(project):
raise HTTPConflict('You cannot expand your group for this project.')
user = User.fetch_by(username=username)
if not user or project.class_ not in user.classes:
raise HTTPConflict('Invalid email.')
if not user.can_join_group(project):
raise HTTPConflict('That user cannot join your group.')
self_assoc = request.user.fetch_group_assoc(project)
user_assoc = user.fetch_group_assoc(project)
if request.user == user or \
self_assoc == user_assoc and self_assoc is not None:
raise HTTPConflict('You are already in a group with that student.')
Session.add(GroupRequest(from_user=request.user, project=project,
to_user=user))
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Could not create your group request.')
site_name = request.registry.settings['site_name']
url = request.route_url('project_group', project_id=project.id)
body = ('Your fellow {} student, {}, has requested you join their '
'group for "{}". Please visit the following link to confirm or '
'deny the request:\n\n{}'
.format(project.class_.name, request.user, project.name, url))
send_email(request, recipients=user.username, body=body,
subject='{}: {} "{}" Group Request'
.format(site_name, project.class_.name, project.name))
request.session.flash('Request to {} sent via email.'.format(user),
'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='project_group_item', renderer='json',
request_method='DELETE')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
group_request=AccessibleDBThing('group_request_id', GroupRequest,
source=MATCHDICT))
def project_group_request_delete(request, project, group_request):
if request.user == group_request.from_user:
msg = 'Revoked request to {}.'.format(group_request.to_user)
else:
msg = 'Denied request from {}.'.format(group_request.from_user)
Session.delete(group_request)
request.session.flash(msg, 'successes')
url = request.route_url('project_group', project_id=project.id)
return http_ok(request, redir_location=url)
@view_config(route_name='project_group', request_method='GET',
renderer='templates/forms/project_group.pt',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT))
def project_group_view(request, project):
assoc = request.user.fetch_group_assoc(project)
members = assoc.group.users_str if assoc else request.user.name
pending = GroupRequest.query_by(project=project, to_user=request.user)
requested = GroupRequest.query_by(from_user=request.user,
project=project).first()
can_join = request.user.can_join_group(project)
return {'project': project, 'members': members, 'can_join': can_join,
'pending': pending.all(), 'requested': requested}
@view_config(route_name='project_info', request_method='GET',
permission='authenticated', renderer='json')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_info(request, project):
retval = {'id': project.id, 'name': project.name, 'testables': {}}
for testable in project.testables:
test_cases = {}
for test_case in testable.test_cases:
stdin = test_case.stdin.sha1 if test_case.stdin else None
expected = test_case.expected.sha1 if test_case.expected else None
test_cases[test_case.name] = {
'id': test_case.id, 'args': test_case.args,
'source': test_case.source,
'stdin': stdin, 'expected': expected,
'output_type': test_case.output_type,
'output_filename': test_case.output_filename}
retval['testables'][testable.name] = {'id': testable.id,
'test_cases': test_cases}
return retval
@view_config(route_name='project_new', request_method='GET',
renderer='templates/forms/project_new.pt',
permission='authenticated')
@validate(class_=EditableDBThing('class_id', Class, source=MATCHDICT))
def project_new(request, class_):
dummy_project = DummyTemplateAttr(None)
dummy_project.class_ = class_
clone_projects = []
for other in sorted(request.user.admin_for):
clone_projects.extend(other.projects)
return {'project': dummy_project, 'clone_projects': clone_projects}
@view_config(route_name='project_edit', renderer='json',
request_method='PUT', permission='authenticated')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_requeue(request, project):
count = 0
for count, submission in enumerate(project.recent_submissions(), start=1):
request.queue(submission_id=submission.id, _priority=2)
if count == 0:
return http_ok(request, message='There are no submissions to requeue.')
request.session.flash('Requeued the most recent submissions ({0} items).'
.format(count), 'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='project_scores', request_method='GET',
permission='authenticated')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_scores(request, project):
rows = ['Name, Email, Group ID, Score (On Time), Score']
_, best_ontime, best = project.process_submissions()
for group, (sub, points) in best.items():
on_time = best_ontime[group][1] if group in best_ontime else ''
for user in group.users:
rows.append('{}, {}, {}, {}, {}'
.format(user.name, user.username, group.id,
points, on_time))
disposition = 'attachment; filename="{0}.csv"'.format(project.name)
return Response(body='\n'.join(rows), content_type=str('text/csv'),
content_disposition=disposition)
@view_config(route_name='submission_item_gen', renderer='json',
request_method='PUT', permission='authenticated')
@validate(submission=EditableDBThing('submission_id', Submission,
source=MATCHDICT))
def project_test_case_generate(request, submission):
project = submission.project
if project.status == u'locked':
raise HTTPConflict('The project is already locked.')
# Verify the submission is okay to use
if not submission.verification_results:
raise HTTPConflict('The submission has not been verified.')
if submission.testables_pending():
raise HTTPConflict('The submission has pending test groups.')
# Look for testables with issues
by_testable = {x.testable: x for x in submission.testable_results}
for testable in submission.project.testables:
if TestableStatus(testable, by_testable.get(testable),
submission.verification_results.errors).issue:
raise HTTPConflict('The submission contains failing test groups.')
# Mark the project and its testables as locked
project.status = u'locked'
for testable in project.testables:
testable.is_locked = True
# Saved attributes
submission_id = submission.id
project_id = project.id
try:
transaction.commit() # Need to commit before queuing the job.
except IntegrityError:
transaction.abort()
raise
# Schedule a task to generate the expected outputs
request.queue(submission_id=submission_id, update_project=True,
_priority=0)
request.session.flash('Rebuilding the project\'s expected outputs.',
'successes')
redir_location = request.route_url('project_edit', project_id=project_id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='project_item_summary', request_method='POST',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=2),
makefile=ViewableDBThing('makefile_id', File, optional=True),
is_ready=TextNumber('is_ready', min_value=0, max_value=1,
optional=True),
deadline=TextDate('deadline', optional=True),
delay_minutes=TextNumber('delay_minutes', min_value=1),
group_max=TextNumber('group_max', min_value=1),
project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_update(request, name, makefile, is_ready, deadline, delay_minutes,
group_max, project):
# Fix timezone if it doesn't exist
if project.deadline and deadline and not deadline.tzinfo:
deadline = deadline.replace(tzinfo=project.deadline.tzinfo)
if not project.update(name=name, makefile=makefile, deadline=deadline,
delay_minutes=delay_minutes,
group_max=group_max,
status=u'ready' if bool(is_ready) else u'notready'):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That project name already exists for the class')
request.session.flash('Project updated', 'successes')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='project_item_detailed',
request_method=('GET', 'HEAD'),
renderer='templates/project_view_detailed.pt',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
group=ViewableDBThing('group_id', Group, source=MATCHDICT))
def project_view_detailed(request, project, group):
submissions = Submission.query_by(project=project, group=group)
if not submissions:
raise HTTPNotFound()
project_admin = project.can_view(request.user)
if project_admin:
prev_group, next_group = prev_next_group(project, group)
else:
prev_group = next_group = None
return {'project': project,
'project_admin': project_admin,
'is_member': request.user in group.users,
'users_str': group.users_str,
'can_edit': project_admin,
'prev_group': prev_group,
'next_group': next_group,
'submissions': sorted(submissions,
key=lambda s: s.created_at,
reverse=True)}
@view_config(route_name='project_item_detailed_user',
renderer='templates/project_view_detailed.pt',
request_method=('GET', 'HEAD'),
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
user=ViewableDBThing('username', User, fetch_by='username',
validator=String('username'), source=MATCHDICT))
def project_view_detailed_user(request, project, user):
group_assoc = user.fetch_group_assoc(project)
if group_assoc:
url = request.route_path('project_item_detailed',
project_id=project.id,
group_id=group_assoc.group_id)
raise HTTPFound(location=url)
return {'project': project,
'project_admin': False,
'is_member': request.user == user,
'users_str': user.name,
'can_edit': False,
'prev_group': None,
'next_group': None,
'submissions': []}
@view_config(route_name='project_item_summary', request_method=('GET', 'HEAD'),
renderer='templates/project_view_summary.pt',
permission='authenticated')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_view_summary(request, project):
# Compute student stats
by_group, best_ontime, best = project.process_submissions()
possible = project.points_possible(include_hidden=True)
if best:
best_scores = numpy.array([x[1] for x in best.values()])
normed = [min(x[1], possible) for x in best.values()]
max_score = max(best_scores)
mean = numpy.mean(best_scores)
median = numpy.median(best_scores)
bins = [x * possible for x in [0, 0, .6, .7, .8, .9, 1, 1]]
bins[1] = min(1, bins[2])
hist, _ = numpy.histogram(normed, range=(0, possible), bins=bins)
else:
hist = max_score = mean = median = None
# Find most recent for each group
submissions = {}
group_truncated = set()
for group in project.groups:
if group in by_group:
newest = by_group[group][:-4:-1]
if group in best:
best[group][0]._is_best = True
if best[group][0] not in newest:
newest.append(best[group][0])
if group in best_ontime:
best_ontime[group][0]._is_best = True
if best_ontime[group][0] not in newest:
newest.append(best_ontime[group][0])
if len(newest) < len(by_group[group]):
group_truncated.add(group)
submissions[group] = newest
else:
submissions[group] = []
# The 16 most recent submissions
recent_submissions = (Submission.query_by(project=project)
.order_by(Submission.created_at.desc())
.limit(16).all())
return {'group_truncated': group_truncated,
'hist': hist,
'max': max_score,
'mean': mean,
'median': median,
'num_groups': len(best),
'project': project,
'recent_submissions': recent_submissions,
'submissions': sorted(submissions.items())}
@view_config(route_name='session', request_method='PUT', renderer='json')
@validate(username=Or('email', EmailAddress(''), String('')),
password=WhiteSpaceString('password', min_length=6),
next_path=String('next', optional=True))
def session_create(request, username, password, next_path):
development_mode = asbool(request.registry.settings.get('development_mode',
False))
user = User.login(username, password, development_mode=development_mode)
if not user:
raise HTTPConflict('Invalid login')
headers = remember(request, user.id)
request.session.flash('Welcome {}!'.format(user.name), 'successes')
url = next_path or request.route_path('user_item', username=user.username)
return http_created(request, headers=headers, redir_location=url)
@view_config(route_name='session', request_method='DELETE', renderer='json',
permission='authenticated')
def session_destroy(request):
headers = forget(request)
return http_gone(request, headers=headers,
redir_location=request.route_path('home'))
@view_config(route_name='session', request_method='GET',
renderer='templates/forms/login.pt')
@validate(username=String('username', optional=True, source=SOURCE_GET),
next_path=String('next', optional=True, source=SOURCE_GET))
def session_edit(request, username, next_path):
next_path = next_path or request.route_url('home')
return {'next': next_path, 'username': username}
@view_config(route_name='submission', renderer='json', request_method='PUT',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project),
file_ids=List('file_ids', TextNumber('', min_value=0),
min_elements=1),
filenames=List('filenames', String('', min_length=1),
min_elements=1))
def submission_create(request, project, file_ids, filenames):
# Additional input verification
filename_set = set(filenames)
if len(filename_set) != len(filenames):
raise HTTPBadRequest('A filename cannot be provided more than once')
elif len(file_ids) != len(filenames):
msg = 'Number of file_ids must match number of filenames'
raise HTTPBadRequest(msg)
# Verify there are no extra files
extra = filename_set - set(x.filename for x in project.file_verifiers)
if extra:
raise HTTPBadRequest('Invalid files: {}'.format(', '.join(extra)))
# Verify user permission on files
msgs = []
user_files = {x.id: x for x in request.user.files}
files = set()
for i, file_id in enumerate(file_ids):
if file_id in user_files:
files.add(user_files[file_id])
else:
msgs.append('Invalid file "{0}"'.format(filenames[i]))
if msgs:
raise HTTPBadRequest(msgs)
submission = request.user.make_submission(project)
# Grant the files' permissions to the other members of the group
for user in submission.group.users:
if user == request.user:
continue
user.files.update(files)
# Associate the files with the submissions by their submission name
assoc = []
for file_id, filename in zip(file_ids, filenames):
assoc.append(SubmissionToFile(file_id=file_id, filename=filename))
submission.files.extend(assoc)
Session.add(submission)
Session.add_all(assoc)
Session.flush()
submission_id = submission.id
# We must commit the transaction before queueing the job.
transaction.commit()
request.queue(submission_id=submission_id)
# Redirect to submission result page
redir_location = request.route_path('submission_item',
submission_id=submission_id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='submission_new', request_method='GET',
renderer='templates/forms/submission_new.pt',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT))
def submission_new(request, project):
return {'project': project,
'submit_path': request.registry.settings['submit_path']}
@view_config(route_name='submission_item', renderer='json',
request_method='PUT', permission='authenticated')
@validate(submission=EditableDBThing('submission_id', Submission,
source=MATCHDICT))
def submission_requeue(request, submission):
request.queue(submission_id=submission.id, _priority=0)
request.session.flash('Requeued the submission', 'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='submission_item', request_method='GET',
renderer='templates/submission_view.pt',
permission='authenticated')
@validate(submission=ViewableDBThing('submission_id', Submission,
source=MATCHDICT),
as_user=TextNumber('as_user', min_value=0, max_value=1,
optional=True, source=SOURCE_GET))
def submission_view(request, submission, as_user):
actual_admin = submission.project.can_edit(request.user)
submission_admin = not bool(as_user) and actual_admin
if not submission_admin: # Only check delay for user view
delay = submission.get_delay(
update=request.user in submission.group.users)
if delay:
request.override_renderer = 'templates/submission_delay.pt'
files = {x.filename: x.file for x in submission.files}
prev_sub, next_sub = prev_next_submission(submission)
return {'delay': '{0:.1f} minutes'.format(delay),
'files': files,
'next_sub': next_sub,
'prev_sub': prev_sub,
'submission': submission,
'submission_admin': actual_admin}
points_possible = submission.project.points_possible(
include_hidden=submission_admin)
if submission_admin:
diff_renderer = HTMLDiff(num_reveal_limit=None,
points_possible=points_possible)
else:
diff_renderer = HTMLDiff(points_possible=points_possible)
for tcr in submission.test_case_results:
if submission_admin or not tcr.test_case.testable.is_hidden:
diff_renderer.add_renderable(prepare_renderable(request, tcr,
submission_admin))
if submission.verification_results:
mapping = submission.file_mapping()
extra_files = {x: mapping[x] for x in
submission.verification_results.extra_filenames}
files = {x.filename: x.file for x in submission.files
if x.filename not in extra_files}
warnings = submission.verification_results.warnings
pending = submission.testables_pending(prune=not submission_admin)
# Build all testables' statuses
# Testables that failed verification do not have a TestableResult
by_testable = {x.testable: x for x in submission.testable_results}
testable_issues = []
# Add testables which have issues (verification or build)
for testable in (set(submission.project.testables) - pending):
if submission_admin or not testable.is_hidden:
ts = TestableStatus(testable, by_testable.get(testable),
submission.verification_results.errors)
if ts.issue:
testable_issues.append(ts)
else:
extra_files = files = pending = warnings = None
testable_issues = []
if submission.testables_succeeded():
# Decode utf-8 and ignore errors until the data is diffed in unicode.
diff_table = diff_renderer.make_whole_file().decode('utf-8', 'ignore')
else:
diff_table = None
# Do this after we've potentially updated the session
prev_sub, next_sub = prev_next_submission(submission)
if submission_admin:
prev_group, next_group = prev_next_group(submission.project,
submission.group)
else:
prev_group = next_group = None
return {'diff_table': diff_table,
'extra_files': extra_files,
'files': files,
'next_sub': next_sub,
'next_group': next_group,
'pending': pending,
'prev_sub': prev_sub,
'prev_group': prev_group,
'submission': submission,
'submission_admin': submission_admin,
'testable_issues': testable_issues,
'warnings': warnings}
@view_config(route_name='test_case', request_method='PUT',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
args=String('args', min_length=1),
expected=ViewableDBThing('expected_id', File, optional=True),
hide_expected=TextNumber('hide_expected', min_value=0, max_value=1,
optional=True),
output_filename=String('output_filename', min_length=1,
optional=True),
output_source=OUTPUT_SOURCE, output_type=OUTPUT_TYPE,
points=TextNumber('points'),
stdin=ViewableDBThing('stdin_id', File, optional=True),
testable=EditableDBThing('testable_id', Testable))
@test_case_verification
def test_case_create(request, name, args, expected, hide_expected,
output_filename, output_source, output_type, points,
stdin, testable):
test_case = TestCase(name=name, args=args, expected=expected,
hide_expected=bool(hide_expected),
output_filename=output_filename,
output_type=output_type, points=points,
source=output_source, stdin=stdin, testable=testable)
Session.add(test_case)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the testable')
redir_location = request.route_path('project_edit',
project_id=testable.project.id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='test_case_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(test_case=EditableDBThing('test_case_id', TestCase,
source=MATCHDICT))
def test_case_delete(request, test_case):
redir_location = request.route_path(
'project_edit', project_id=test_case.testable.project.id)
request.session.flash('Deleted TestCase {0}.'.format(test_case.name),
'successes')
testable = test_case.testable
Session.delete(test_case)
# Update the testable point score
testable.update_points()
return http_ok(request, redir_location=redir_location)
@view_config(route_name='test_case_item', request_method='POST',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
args=String('args', min_length=1),
expected=ViewableDBThing('expected_id', File, optional=True),
hide_expected=TextNumber('hide_expected', min_value=0, max_value=1,
optional=True),
output_filename=String('output_filename', min_length=1,
optional=True),
output_source=OUTPUT_SOURCE, output_type=OUTPUT_TYPE,
points=TextNumber('points'),
stdin=ViewableDBThing('stdin_id', File, optional=True),
test_case=EditableDBThing('test_case_id', TestCase,
source=MATCHDICT))
@test_case_verification
def test_case_update(request, name, args, expected, hide_expected,
output_filename, output_source, output_type, points,
stdin, test_case):
if not test_case.update(name=name, args=args, expected=expected,
hide_expected=bool(hide_expected),
output_filename=output_filename,
output_type=output_type, points=points,
source=output_source, stdin=stdin):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the testable')
# Update the testable point score
test_case.testable.update_points()
request.session.flash('Updated TestCase {0}.'.format(test_case.name),
'successes')
redir_location = request.route_path(
'project_edit', project_id=test_case.testable.project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='testable', request_method='PUT',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
is_hidden=TextNumber('is_hidden', min_value=0, max_value=1,
optional=True),
make_target=String('make_target', min_length=1, optional=True),
executable=String('executable', min_length=1),
build_file_ids=List('build_file_ids', TextNumber('', min_value=0),
optional=True),
execution_file_ids=List('execution_file_ids',
TextNumber('', min_value=0), optional=True),
file_verifier_ids=List('file_verifier_ids',
TextNumber('', min_value=0), optional=True),
project=EditableDBThing('project_id', Project))
def testable_create(request, name, is_hidden, make_target, executable,
build_file_ids, execution_file_ids, file_verifier_ids,
project):
if make_target and not project.makefile:
msg = 'make_target cannot be specified without a make file'
raise HTTPBadRequest(msg)
try:
# Verify the ids actually exist and are associated with the project
build_files = fetch_request_ids(build_file_ids, BuildFile,
'build_file_id',
project.build_files)
execution_files = fetch_request_ids(execution_file_ids, ExecutionFile,
'execution_file_id')
file_verifiers = fetch_request_ids(file_verifier_ids, FileVerifier,
'file_verifier_id',
project.file_verifiers)
except InvalidId as exc:
raise HTTPBadRequest('Invalid {0}'.format(exc.message))
testable = Testable(name=name, is_hidden=bool(is_hidden),
make_target=make_target,
executable=executable, project=project,
build_files=build_files,
execution_files=execution_files,
file_verifiers=file_verifiers)
redir_location = request.route_path('project_edit', project_id=project.id)
Session.add(testable)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the project')
return http_created(request, redir_location=redir_location,
testable_id=testable.id)
@view_config(route_name='testable_item', request_method='POST',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
is_hidden=TextNumber('is_hidden', min_value=0, max_value=1,
optional=True),
make_target=String('make_target', min_length=1, optional=True),
executable=String('executable', min_length=1),
build_file_ids=List('build_file_ids', TextNumber('', min_value=0),
optional=True),
execution_file_ids=List('execution_file_ids',
TextNumber('', min_value=0), optional=True),
file_verifier_ids=List('file_verifier_ids',
TextNumber('', min_value=0), optional=True),
testable=EditableDBThing('testable_id', Testable, source=MATCHDICT))
def testable_edit(request, name, is_hidden, make_target, executable,
build_file_ids, execution_file_ids, file_verifier_ids,
testable):
if make_target and not testable.project.makefile:
msg = 'make_target cannot be specified without a make file'
raise HTTPBadRequest(msg)
try:
# Verify the ids actually exist and are associated with the project
build_files = fetch_request_ids(build_file_ids, BuildFile,
'build_file_id',
testable.project.build_files)
execution_files = fetch_request_ids(execution_file_ids, ExecutionFile,
'execution_file_id')
file_verifiers = fetch_request_ids(file_verifier_ids, FileVerifier,
'file_verifier_id',
testable.project.file_verifiers)
except InvalidId as exc:
raise HTTPBadRequest('Invalid {0}'.format(exc.message))
Session.autoflush = False # Don't flush while testing for changes
if not testable.update(_ignore_order=True, is_hidden=bool(is_hidden),
name=name, make_target=make_target,
executable=executable,
build_files=build_files,
execution_files=execution_files,
file_verifiers=file_verifiers):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the project')
request.session.flash('Updated Testable {0}.'.format(testable.name),
'successes')
redir_location = request.route_path('project_edit',
project_id=testable.project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='testable_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(testable=EditableDBThing('testable_id', Testable, source=MATCHDICT))
def testable_delete(request, testable):
redir_location = request.route_path('project_edit',
project_id=testable.project.id)
request.session.flash('Deleted Testable {0}.'.format(testable.name),
'successes')
Session.delete(testable)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='user', request_method='PUT', renderer='json')
@validate(identity=UmailAddress('email', min_length=16, max_length=64),
verification=String('verification'))
def user_create(request, identity, verification):
username, name = identity
return add_user(request, name, username, verification)
@view_config(route_name='user', request_method='ADMINPUT', renderer='json',
permission='admin')
@validate(name=String('name', min_length=5),
username=String('email', min_length=6, max_length=64),
verification=String('verification'))
def user_create_special(request, name, username, verification):
return add_user(request, name, username, verification,
request.route_path('user_new_special'))
@view_config(route_name='user_join', request_method='GET',
permission='authenticated',
renderer='templates/forms/class_join_list.pt')
def user_join(request):
# get all the classes that the given user is not in, and let the
# user optionally join them
all_classes = frozenset(Class.query_by(is_locked=False).all())
user_classes = frozenset(request.user.classes)
return {'classes': sorted(all_classes - user_classes)}
@view_config(route_name='user_new', request_method='GET',
renderer='templates/forms/user_create.pt')
def user_edit(request):
return {}
@view_config(route_name='user_new_special', request_method='GET',
renderer='templates/forms/user_create_special.pt',
permission='admin')
def user_edit_special(request):
return {}
@view_config(route_name='user_item', request_method='GET',
renderer='templates/user_view.pt', permission='authenticated')
@validate(user=ViewableDBThing('username', User, fetch_by='username',
validator=String('username'), source=MATCHDICT))
def user_view(request, user):
user_groups = [x.group_id for x in Session.query(UserToGroup)
.filter(UserToGroup.user == user).all()]
admin_subs = user_subs = None
if user_groups:
user_subs = (Submission.query_by()
.filter(Submission.group_id.in_(user_groups))
.order_by(Submission.created_at.desc()).limit(10).all())
admin_classes = user.classes_can_admin()
if admin_classes:
class_ids = [x.id for x in admin_classes]
class_projs = [x.id for x in Project.query_by()
.filter(Project.class_id.in_(class_ids))
.all()]
if class_projs:
admin_subs = (Submission.query_by()
.filter(Submission.project_id.in_(class_projs))
.order_by(Submission.created_at.desc()).limit(10)
.all())
return {'name': user.name,
'user_subs': user_subs,
'classes_taking': sorted(user.classes),
'admin_subs': admin_subs,
'admin_classes': admin_classes}
@view_config(route_name='zipfile_download', request_method='GET',
permission='authenticated')
@validate(submission=ViewableDBThing('submission_id', Submission,
source=MATCHDICT))
def zipfile_download(request, submission):
def file_path(file_):
return File.file_path(request.registry.settings['file_directory'],
file_.sha1)
users = submission.group.users_str.replace(' ', '_').replace(',', '-')
base_path = '{0}_{1}'.format(users, submission.id)
# include makefile and student submitted files
files = [(os.path.join(base_path, 'Makefile'),
file_path(submission.project.makefile))]
for filename, file_ in submission.file_mapping().items():
files.append((os.path.join(base_path, filename), file_path(file_)))
return zip_response(request, base_path + '.zip', files)
|
1626851
|
import os
from aioresponses import aioresponses
from aiounittest import AsyncTestCase, futurized
from asyncopenstackclient import AuthPassword
from unittest.mock import patch
class TestAuth(AsyncTestCase):
def setUp(self):
self.auth_args = ('http://url', 'm_user', 'm_pass', 'm_project',
'm_user_domain', 'm_project_domain')
self.auth = AuthPassword(*self.auth_args)
def tearDown(self):
patch.stopall()
for name in list(os.environ.keys()):
if name.startswith('OS_'):
del os.environ[name]
async def test_create_object(self):
expected_payload = {'auth': {
'identity': {'methods': ['password'], 'password': {'user': {
'domain': {'name': 'm_user_domain'},
'name': 'm_user', 'password': '<PASSWORD>'
}}},
'scope': {'project': {'domain': {'name': 'm_project_domain'}, 'name': 'm_project'}}
}}
self.assertEqual(self.auth._auth_payload, expected_payload)
self.assertEqual(self.auth._auth_endpoint, 'http://url/auth/tokens')
self.assertTrue('Content-Type' in self.auth.headers)
async def test_create_object_use_environ(self):
expected_payload = {'auth': {
'identity': {'methods': ['password'], 'password': {'user': {'domain': {'name': 'udm'}, 'name': 'uuu', 'password': '<PASSWORD>'}}},
'scope': {'project': {'domain': {'name': 'udm'}, 'name': 'prj'}}
}}
env = {
'OS_AUTH_URL': 'https://keystone/v3',
'OS_PASSWORD': '<PASSWORD>', 'OS_USERNAME': 'uuu',
'OS_USER_DOMAIN_NAME': 'udm', 'OS_PROJECT_NAME': 'prj'
}
with patch.dict('os.environ', env, clear=True):
auth = AuthPassword()
self.assertEqual(auth._auth_payload, expected_payload)
self.assertEqual(auth._auth_endpoint, 'https://keystone/v3/auth/tokens')
self.assertTrue('Content-Type' in auth.headers)
async def test_get_token(self):
body = {
"token": {
"catalog": {
"endpoints": [
{"name": "mock_endpoint", "endpoints": [{"url": "mock_url", "interface": "public"}]}
]
},
"expires_at": "1970-01-01T01:00:00.000000Z"
}
}
headers = {
"Vary": "X-Auth-Token",
"x-openstack-request-id": "1234",
"Content-Type": "application/json",
"X-Subject-Token": "<PASSWORD>"
}
with aioresponses() as req:
req.post('http://url/auth/tokens', payload=body, headers=headers)
res = await self.auth.get_token()
self.assertEqual(res, (headers["X-Subject-Token"], 3600, body["token"]["catalog"]))
def test_get_endpoint_url_existing_endpoint(self):
self.auth.endpoints = [
{"name": "mock_endpoint", "endpoints": [{"url": "mock_url", "interface": "public"}]}
]
endpoint_url = self.auth.get_endpoint_url("mock_endpoint")
self.assertEqual(endpoint_url, "mock_url")
def test_get_endpoint_url_bad_endpoint_name(self):
self.auth.endpoints = [
{"name": "mock_endpoint", "endpoints": [{"url": "mock_url", "interface": "public"}]}
]
with self.assertRaises(ValueError):
self.auth.get_endpoint_url("none_existing_endpoint")
def test_get_endpoint_url_bad_interface(self):
self.auth.endpoints = [
{"name": "mock_endpoint", "endpoints": [{"url": "mock_url", "interface": "public"}]}
]
with self.assertRaises(ValueError):
self.auth.get_endpoint_url("mock_endpoint", prefered_interface="not_existing_interface")
async def test_authenticate_first_time(self):
mock_get_token_results = [
futurized(('mock-token1', 1000, 'whatever')),
futurized(('mock-token2', 1000, 'whatever')),
]
# time is gonna be called 2 times becouse of Pythons lazy evaluation
mock_time_results = [
900,
1100
]
patch('asyncopenstackclient.auth.AuthPassword.get_token', side_effect=mock_get_token_results).start()
patch('asyncopenstackclient.auth.time', side_effect=mock_time_results).start()
# first time token should be None and get_token shall be called
await self.auth.authenticate()
self.assertEqual(self.auth.token, 'mock-<PASSWORD>')
# second time, token is not None and current time is before token expiration, no change
await self.auth.authenticate()
self.assertEqual(self.auth.token, 'mock-token1')
# third time, token expires and should be renewed
await self.auth.authenticate()
self.assertEqual(self.auth.token, 'mock-token2')
|
1626859
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import utils.distance as distance
import utils.inertia as inertia
from swarm import Swarm, Particle
import functions
|
1626863
|
from zlib import decompress
def get_row(db, name):
query = "SELECT rowid, sz FROM sqlar WHERE name=:name"
return db.cursor().execute(query, {"name": name}).fetchone()
def get_blob(db, row):
return db.blobopen("main", "sqlar", "data", row["rowid"], False)
def get_data(db, row):
blob = get_blob(db, row)
length = blob.length()
data = blob.read()
return decompress(data) if row["sz"] != length else data
|
1626900
|
from flask import url_for
from arrested import ArrestedAPI, Resource, Endpoint
def initialise_app_via_constructor(app):
"""Test instantiating ArrestedAPI obj passing flask app object directly
"""
api_v1 = ArrestedAPI(app)
assert api_v1.app == app
def defer_app_initialisation(app):
"""Test deferring initialising the flask app object using init_app method.
"""
api_v1 = ArrestedAPI()
api_v1.init_app(app)
assert api_v1.app == app
def test_register_resource(app):
"""Test that Resources are properly reigstered as a blueprint when
ArrestedAPI.register_resource is called.
"""
api_v1 = ArrestedAPI(app)
example_resource = Resource('example', __name__, url_prefix='/example')
api_v1.register_resource(example_resource)
assert app.blueprints == {'example': example_resource}
def test_register_all(app):
"""Test that Resources are properly reigstered as a blueprint when
ArrestedAPI.register_resource is called.
"""
api_v1 = ArrestedAPI(app)
example_resource = Resource('example', __name__, url_prefix='/example')
example_resource_2 = Resource('example_2', __name__, url_prefix='/example-2')
api_v1.register_all([example_resource, example_resource_2])
assert app.blueprints == {
'example': example_resource,
'example_2': example_resource_2
}
def test_defer_resource_registration(app):
"""Test that Resources are properly reigstered as a blueprint when
ArrestedAPI.register_resource is called.
"""
api_v1 = ArrestedAPI()
example_resource = Resource('example', __name__, url_prefix='/example')
example_resource_2 = Resource('example_2', __name__, url_prefix='/example-2')
api_v1.register_resource(example_resource, defer=True)
api_v1.register_resource(example_resource_2, defer=True)
assert app.blueprints == {}
api_v1.init_app(app)
assert app.blueprints == {
'example': example_resource,
'example_2': example_resource_2
}
def test_register_resource_with_url_prefix(app):
"""Test that the url_prefix is correctly applied to all resources when provided
"""
api_v1 = ArrestedAPI(app, url_prefix='/v1')
example_resource = Resource('example', __name__, url_prefix='/example')
class MyEndpoint(Endpoint):
name = 'test'
example_resource.add_endpoint(MyEndpoint)
api_v1.register_resource(example_resource)
assert url_for('example.test') == '/v1/example'
def test_api_request_middleware(app, client):
evts = []
def api_before_func(*args, **kwarsg):
evts.append('api_before')
return None
def api_after_func(endpoint, response):
response.data += b'|api_after'
evts.append('api_after')
return response
def resource_before_func(endpoint):
evts.append('resource_before')
return None
def resource_after_func(endpoint, response):
response.data += b'|resource_after'
evts.append('resource_after')
return response
api_v1 = ArrestedAPI(
app,
url_prefix='/v1',
before_all_hooks=[api_before_func],
after_all_hooks=[api_after_func]
)
example_resource = Resource(
'example', __name__,
url_prefix='/example',
before_all_hooks=[resource_before_func],
after_all_hooks=[resource_after_func]
)
class MyEndpoint(Endpoint):
name = 'test'
def get(self, *args, **kwargs):
assert 'api_before' in evts
assert 'api_after' not in evts
assert 'resource_before' in evts
assert 'resource_after' not in evts
return 'request'
example_resource.add_endpoint(MyEndpoint)
api_v1.register_resource(
example_resource,
)
resp = client.get(url_for('example.test'))
assert resp.data == b'request|resource_after|api_after'
assert evts == ['api_before', 'resource_before', 'resource_after', 'api_after']
def test_api_request_middleware_limited_to_api(app, client):
evts = []
def api_before_func(*args, **kwarsg):
evts.append('api_before')
return None
def api_after_func(endpoint, response):
response.data += b'|api_after'
evts.append('api_after')
return response
def resource_before_func(endpoint):
evts.append('resource_before')
return None
def resource_after_func(endpoint, response):
response.data += b'|resource_after'
evts.append('resource_after')
return response
api_v1 = ArrestedAPI(
app,
url_prefix='/v1',
before_all_hooks=[api_before_func],
after_all_hooks=[api_after_func]
)
api_v2 = ArrestedAPI(
app,
url_prefix='/v2',
)
example_resource = Resource(
'example', __name__,
url_prefix='/example',
before_all_hooks=[resource_before_func],
after_all_hooks=[resource_after_func]
)
example2_resource = Resource(
'example2', __name__,
url_prefix='/example2'
)
class MyEndpoint(Endpoint):
name = 'test'
def get(self, *args, **kwargs):
assert 'api_before' not in evts
assert 'api_after' not in evts
assert 'resource_before' not in evts
assert 'resource_after' not in evts
return 'request'
example_resource.add_endpoint(MyEndpoint)
example2_resource.add_endpoint(MyEndpoint)
api_v1.register_resource(
example_resource,
)
api_v2.register_resource(
example2_resource,
)
resp = client.get(url_for('example2.test'))
assert resp.data == b'request'
|
1626909
|
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
xs = mnist.test.images
ys = mnist.test.labels
np.save('orig_images.npy', xs)
np.save('orig_labels.npy', ys)
|
1627007
|
import pyjd
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.HTML import HTML
from pyjamas.JSONService import JSONProxy
from pyjamas import DOM
from pyjamas import Factory
from pyjamas import History
from pyjamas import Window
from pyjamas import logging
log = logging.getAppendLogger(__name__, logging.DEBUG, logging.PLAIN_FORMAT)
from WebPageEdit import WebPageEdit
from pyjamas.ui.HTMLLinkPanel import HTMLLinkPanel
import Email # don't do anything with it - just import it and use factory
class WebApp:
def onModuleLoad(self):
self.remote = DataService()
#Show the initial screen.
initToken = History.getToken()
if initToken and len(initToken):
if initToken == 'admin':
RootPanel().add(WebPageEdit(self))
return
else:
initToken = 'index'
self.dock = DockPanel()
self.dock.setWidth("100%")
self.pages = {}
self.current_page = None
RootPanel().add(self.dock)
History.addHistoryListener(self)
self.onHistoryChanged(initToken)
def createPage(self, ref, html, title):
htp = HTMLLinkPanel(html)
htp.replaceLinks()
htp.setWidth("100%")
self.pages[ref] = htp
def onHistoryChanged(self, token):
#log.debug("onHistoryChanged %s" % token)
if self.pages.has_key(token):
self.setPage(token)
return
self.remote.getPageByName(token, self)
def setPage(self, ref):
htp = self.pages[ref]
if htp == self.current_page:
return
Window.setTitle(htp.title)
if self.current_page:
self.dock.remove(self.current_page)
self.dock.add(htp, DockPanel.CENTER)
self.current_page = htp
def onRemoteResponse(self, response, request_info):
if (request_info.method == 'getPageByName' or
request_info.method == 'getPage'):
item = response[0]
html = item['fields']['text']
token = item['fields']['name']
self.createPage(token, html, token)
self.setPage(token)
def onRemoteError(self, code, message, request_info):
RootPanel().add(HTML("Server Error or Invalid Response: ERROR " + str(code) + " - " + str(message)))
class DataService(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "/services/pages/",
["getPage", "updatePage",
"getPages", "addPage",
"getPageByName",
"deletePage"])
if __name__ == "__main__":
pyjd.setup("http://127.0.0.8/site_media/public/index.html")
el = DOM.createElement("div")
el.innerHTML = "test"
DOM.setAttribute(el, "id", "pyjamas.apps.Email(Text='hello')")
label = Factory.createWidgetOnElement(el)
RootPanel().add(label)
app = WebApp()
app.onModuleLoad()
pyjd.run()
|
1627039
|
from arepl_pickler import specialVars, pickle_user_vars, pickle_user_error
import arepl_python_evaluator as python_evaluator
import arepl_jsonpickle as jsonpickle
def test_special_floats():
x = float("infinity")
y = float("nan")
z = float("-infinity")
vars = jsonpickle.decode(pickle_user_vars(locals()))
assert vars["x"] == "Infinity"
assert vars["y"] == "NaN"
assert vars["z"] == "-Infinity"
def test_default_type_filter():
def foo():
return 3
cat = 2
vars = jsonpickle.decode(pickle_user_vars(locals()))
assert vars["cat"] == 2
assert "foo" not in vars
def test_custom_filter():
arepl_filter = ["dog"]
dog = 1
cat = 2
vars = jsonpickle.decode(pickle_user_vars(locals()))
assert vars["cat"] == 2
assert "dog" not in vars
assert "arepl_filter" not in vars
def test_custom_type_filter():
arepl_filter_type = ["<class 'str'>"]
dog = ""
cat = 2
vars = jsonpickle.decode(pickle_user_vars(locals()))
assert vars["cat"] == 2
assert "dog" not in vars
assert "arepl_filter_type" not in vars
def test_custom_filter_function():
def arepl_filter_function(userVariables):
userVariables["a"] = 3
return userVariables
vars = jsonpickle.decode(pickle_user_vars(locals()))
assert vars["a"] == 3
assert "arepl_filter_function" not in vars
def test_jsonpickle_err_doesnt_break_arepl():
class foo:
def __getstate__(self):
a
f = foo()
assert jsonpickle.decode(pickle_user_vars(locals()))["f"] == "AREPL could not pickle this object"
# I don't want to require pandas to run tests
# So leaving this commented, devs can uncomment to run test if they want to
# def test_jsonpickle_err_doesnt_break_arepl_2():
# import pandas as pd
# lets = ['A', 'B', 'C']
# nums = ['1', '2', '3']
# midx = pd.MultiIndex.from_product([lets, nums])
# units = pd.Series(0, index=midx)
# assert jsonpickle.decode(pickle_user_vars(locals()))["units"] == "AREPL could not pickle this object"
def test_error_has_extended_traceback_1():
try:
python_evaluator.exec_input(
python_evaluator.ExecArgs(
"""
try:
x
except NameError as e:
x=1/0
"""
)
)
except (KeyboardInterrupt, SystemExit):
raise
except python_evaluator.UserError as e:
json = pickle_user_error(e.traceback_exception)
assert "ZeroDivisionError" in json
assert "NameError" in json
def test_error_has_extended_traceback_2():
try:
python_evaluator.exec_input(
python_evaluator.ExecArgs(
"""
def foo():
raise ZeroDivisionError()
try:
foo()
except Exception as e:
fah
"""
)
)
except (KeyboardInterrupt, SystemExit):
raise
except python_evaluator.UserError as e:
json = pickle_user_error(e.traceback_exception)
assert "NameError" in json
assert "ZeroDivisionError" in json
|
1627042
|
import logging
from argparse import Namespace
from re import RegexFlag, fullmatch
from typing import Optional, Callable, Awaitable
from aiohttp.hdrs import METH_OPTIONS
from aiohttp.web import HTTPRedirection, HTTPNotFound, HTTPBadRequest, HTTPException, HTTPNoContent
from aiohttp.web_exceptions import HTTPServiceUnavailable
from aiohttp.web_middlewares import middleware
from aiohttp.web_request import Request
from aiohttp.web_response import StreamResponse
from core import version
from core.analytics import AnalyticsEventSender, CoreEvent
from core.error import NotFoundError, ClientError
from core.metrics import RequestInProgress, RequestLatency, RequestCount, perf_now
from core.web import RequestHandler, api # pylint: disable=unused-import # prevent circular import
log = logging.getLogger(__name__)
def enable_compression(request: Request, response: StreamResponse) -> None:
# The UI can not handle compressed responses. Allow compression only if requested by somebody else
if "resotoui-via" not in request.headers:
response.enable_compression()
async def on_response_prepare(request: Request, response: StreamResponse) -> None:
# Headers are required for the UI to work, since it uses SharedArrayBuffer.
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer
if fullmatch("/ui/.*", request.path, RegexFlag.IGNORECASE):
response.headers["Cross-Origin-Opener-Policy"] = "same-origin"
response.headers["Cross-Origin-Embedder-Policy"] = "require-corp"
# In case of a CORS request: a response header to allow the origin is required
if request.headers.get("sec-fetch-mode") == "cors":
response.headers["Access-Control-Allow-Origin"] = request.headers.get("origin", "*")
@middleware
async def cors_handler(request: Request, handler: RequestHandler) -> StreamResponse:
if request.method == METH_OPTIONS:
return HTTPNoContent(
headers={
# allow origin of request or all if none is defined.
"Access-Control-Allow-Origin": request.headers.get("origin", "*"),
# allow the requested method or all if none is defined.
"Access-Control-Allow-Methods": request.headers.get("access-control-request-method", "*"),
# allow the requested header names or all if none is defined.
"Access-Control-Allow-Headers": request.headers.get("access-control-request-headers", "*"),
# allow the client to cache this result
"Access-Control-Max-Age": "86400", # allow caching for one day
}
)
else:
return await handler(request)
@middleware
async def metrics_handler(request: Request, handler: RequestHandler) -> StreamResponse:
request["start_time"] = perf_now()
RequestInProgress.labels(request.path, request.method).inc()
try:
response = await handler(request)
RequestCount.labels(request.method, request.path, response.status).inc()
return response
except HTTPException as ex:
RequestCount.labels(request.method, request.path, ex.status).inc()
raise ex
finally:
resp_time = perf_now() - request["start_time"]
RequestLatency.labels(request.path).observe(resp_time)
RequestInProgress.labels(request.path, request.method).dec()
def error_handler(
args: Namespace, event_sender: AnalyticsEventSender
) -> Callable[[Request, RequestHandler], Awaitable[StreamResponse]]:
is_debug = (logging.root.level < logging.INFO) or args.debug
def exc_info(ex: Exception) -> Optional[Exception]:
return ex if is_debug else None
@middleware
async def error_handler_middleware(request: Request, handler: RequestHandler) -> StreamResponse:
try:
return await handler(request)
except HTTPRedirection as e:
# redirects are implemented as exceptions in aiohttp for whatever reason...
raise e
except NotFoundError as e:
kind = type(e).__name__
message = f"Error: {kind}\nMessage: {str(e)}"
log.info(f"Request {request} has failed with exception: {message}", exc_info=exc_info(e))
raise HTTPNotFound(text=message) from e
except (ClientError, AttributeError) as e:
kind = type(e).__name__
ex_str = str(e)
message = f"Error: {kind}\nMessage: {ex_str}"
log.info(f"Request {request} has failed with exception: {message}", exc_info=exc_info(e))
await event_sender.core_event(
CoreEvent.ClientError, {"version": version(), "kind": kind, "message": ex_str}
)
raise HTTPBadRequest(text=message) from e
except Exception as e:
kind = type(e).__name__
ex_str = str(e)
message = f"Error: {kind}\nMessage: {ex_str}"
log.warning(f"Request {request} has failed with exception: {message}", exc_info=exc_info(e))
await event_sender.core_event(
CoreEvent.ServerError, {"version": version(), "kind": kind, "message": ex_str}
)
raise HTTPBadRequest(text=message) from e
return error_handler_middleware
def default_middleware(api_handler: "api.Api") -> Callable[[Request, RequestHandler], Awaitable[StreamResponse]]:
@middleware
async def default_handler(request: Request, handler: RequestHandler) -> StreamResponse:
if api_handler.in_shutdown:
# We are currently in shutdown: inform the caller to retry later.
return HTTPServiceUnavailable(headers={"Retry-After": "5"})
else:
return await handler(request)
return default_handler
|
1627053
|
import FWCore.ParameterSet.Config as cms
fftjetVertexAdder = cms.EDProducer(
"FFTJetVertexAdder",
#
# Label for the beam spot info
beamSpotLabel = cms.InputTag("offlineBeamSpot"),
#
# Label for an existing collection of primary vertices
existingVerticesLabel = cms.InputTag("offlinePrimaryVertices"),
#
# Label for the output collection
outputLabel = cms.string("FFTJetFudgedVertices"),
#
# Do we want to use the beam spot info from the event data
#in order to generate the vertices?
useBeamSpot = cms.bool(True),
#
# Do we want to an existing collection (labeled by "existingVerticesLabel"
# defined above) to the fake vertices?
addExistingVertices = cms.bool(False),
#
# If we are not using the beam spot, what would be the average
# position of the generated vertices?
fixedX = cms.double(0.0),
fixedY = cms.double(0.0),
fixedZ = cms.double(0.0),
#
# If we are not using the beam spot, what would be the vertex spread?
sigmaX = cms.double(0.0014),
sigmaY = cms.double(0.0014),
sigmaZ = cms.double(6.0),
#
# Parameters of the vertex to generate (these are not varied)
nDof = cms.double(10.0),
chi2 = cms.double(10.0),
errX = cms.double(0.001),
errY = cms.double(0.001),
errZ = cms.double(0.01),
#
# How many fake vertices should we make?
nVerticesToMake = cms.uint32(1)
)
|
1627063
|
import argparse
import logging
import ipdb
import os
import sys
import torch
import random
import importlib
import yaml
from box import Box
from pathlib import Path
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
import src
def main(args):
logging.info(f'Load the config from "{args.config_path}".')
config = Box.from_yaml(filename=args.config_path)
saved_dir = Path(config.main.saved_dir)
if not saved_dir.is_dir():
saved_dir.mkdir(parents=True)
logging.info(f'Save the config to "{config.main.saved_dir}".')
with open(saved_dir / 'config.yaml', 'w+') as f:
yaml.dump(config.to_dict(), f, default_flow_style=False)
if not args.test:
# Make the experiment results deterministic.
random.seed(config.main.random_seed)
torch.manual_seed(random.getstate()[1][1])
torch.cuda.manual_seed_all(random.getstate()[1][1])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
logging.info('Create the device.')
if 'cuda' in config.trainer.kwargs.device and not torch.cuda.is_available():
raise ValueError("The cuda is not available. Please set the device in the trainer section to 'cpu'.")
device = torch.device(config.trainer.kwargs.device)
logging.info('Create the training and validation datasets.')
data_dir = Path(config.dataset.kwargs.data_dir)
config.dataset.kwargs.update(data_dir=data_dir, type='train')
train_dataset = _get_instance(src.data.datasets, config.dataset)
config.dataset.kwargs.update(data_dir=data_dir, type='valid')
valid_dataset = _get_instance(src.data.datasets, config.dataset)
logging.info('Create the training and validation dataloaders.')
cls = getattr(src.data.datasets, config.dataset.name)
train_batch_size, valid_batch_size = config.dataloader.kwargs.pop('train_batch_size'), config.dataloader.kwargs.pop('valid_batch_size')
config.dataloader.kwargs.update(collate_fn=getattr(cls, 'collate_fn', None), batch_size=train_batch_size)
train_dataloader = _get_instance(src.data.dataloader, config.dataloader, train_dataset)
config.dataloader.kwargs.update(batch_size=valid_batch_size)
valid_dataloader = _get_instance(src.data.dataloader, config.dataloader, valid_dataset)
logging.info('Create the network architecture.')
net = _get_instance(src.model.nets, config.net)
logging.info('Create the loss functions and the corresponding weights.')
loss_fns, loss_weights = [], []
defaulted_loss_fns = [loss_fn for loss_fn in dir(torch.nn) if 'Loss' in loss_fn]
for config_loss in config.losses:
if config_loss.name in defaulted_loss_fns:
loss_fn = _get_instance(torch.nn, config_loss)
else:
loss_fn = _get_instance(src.model.losses, config_loss)
loss_fns.append(loss_fn)
loss_weights.append(config_loss.weight)
logging.info('Create the metric functions.')
metric_fns = [_get_instance(src.model.metrics, config_metric) for config_metric in config.metrics]
logging.info('Create the optimizer.')
optimizer = _get_instance(torch.optim, config.optimizer, net.parameters())
logging.info('Create the learning rate scheduler.')
lr_scheduler = _get_instance(torch.optim.lr_scheduler, config.lr_scheduler, optimizer) if config.get('lr_scheduler') else None
logging.info('Create the logger.')
config.logger.kwargs.update(log_dir=saved_dir / 'log', net=net, dummy_input=torch.randn(tuple(config.logger.kwargs.dummy_input)))
logger = _get_instance(src.callbacks.loggers, config.logger)
logging.info('Create the monitor.')
config.monitor.kwargs.update(checkpoints_dir=saved_dir / 'checkpoints')
monitor = _get_instance(src.callbacks.monitor, config.monitor)
logging.info('Create the trainer.')
kwargs = {'device': device,
'train_dataloader': train_dataloader,
'valid_dataloader': valid_dataloader,
'net': net,
'loss_fns': loss_fns,
'loss_weights': loss_weights,
'metric_fns': metric_fns,
'optimizer': optimizer,
'lr_scheduler': lr_scheduler,
'logger': logger,
'monitor': monitor}
config.trainer.kwargs.update(kwargs)
trainer = _get_instance(src.runner.trainers, config.trainer)
loaded_path = config.main.get('loaded_path')
if loaded_path:
logging.info(f'Load the previous checkpoint from "{loaded_path}".')
trainer.load(Path(loaded_path))
logging.info('Resume training.')
else:
logging.info('Start training.')
trainer.train()
logging.info('End training.')
else:
logging.info('Create the device.')
if 'cuda' in config.predictor.kwargs.device and not torch.cuda.is_available():
raise ValueError("The cuda is not available. Please set the device in the predictor section to 'cpu'.")
device = torch.device(config.predictor.kwargs.device)
logging.info('Create the testing dataset.')
data_dir = Path(config.dataset.kwargs.data_dir)
config.dataset.kwargs.update(data_dir=data_dir, type='test')
test_dataset = _get_instance(src.data.datasets, config.dataset)
logging.info('Create the testing dataloader.')
test_dataloader = _get_instance(src.data.dataloader, config.dataloader, test_dataset)
logging.info('Create the network architecture.')
net = _get_instance(src.model.nets, config.net)
logging.info('Create the loss functions and the corresponding weights.')
loss_fns, loss_weights = [], []
defaulted_loss_fns = [loss_fn for loss_fn in dir(torch.nn) if 'Loss' in loss_fn]
for config_loss in config.losses:
if config_loss.name in defaulted_loss_fns:
loss_fn = _get_instance(torch.nn, config_loss)
else:
loss_fn = _get_instance(src.model.losses, config_loss)
loss_fns.append(loss_fn)
loss_weights.append(config_loss.weight)
logging.info('Create the metric functions.')
metric_fns = [_get_instance(src.model.metrics, config_metric) for config_metric in config.metrics]
logging.info('Create the predictor.')
kwargs = {'device': device,
'test_dataloader': test_dataloader,
'net': net,
'loss_fns': loss_fns,
'loss_weights': loss_weights,
'metric_fns': metric_fns}
config.predictor.kwargs.update(kwargs)
predictor = _get_instance(src.runner.predictors, config.predictor)
if config.net.name != 'Bicubic':
logging.info(f'Load the previous checkpoint from "{config.main.loaded_path}".')
predictor.load(Path(config.main.loaded_path))
logging.info('Start testing.')
predictor.predict()
logging.info('End testing.')
def _parse_args():
parser = argparse.ArgumentParser(description="The script for the training and the testing.")
parser.add_argument('config_path', type=Path, help='The path of the config file.')
parser.add_argument('--test', action='store_true', help='Perform the training if specified; otherwise perform the testing.')
args = parser.parse_args()
return args
def _get_instance(module, config, *args):
"""
Args:
module (module): The python module.
config (Box): The config to create the class object.
Returns:
instance (object): The class object defined in the module.
"""
cls = getattr(module, config.name)
kwargs = config.get('kwargs')
return cls(*args, **config.kwargs) if kwargs else cls(*args)
if __name__ == "__main__":
#with ipdb.launch_ipdb_on_exception():
# sys.breakpointhook = ipdb.set_trace
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
args = _parse_args()
main(args)
|
1627100
|
import re
import numpy as np
from string import punctuation
# snowball stopwords from http://snowball.tartarus.org/algorithms/english/stop.txt
_STOPWORDS = {'a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and', 'any', 'are', "aren't", 'as',
'at', 'be', 'because', 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by', "can't",
'cannot', 'could', "couldn't", 'did', "didn't", 'do', 'does', "doesn't", 'doing', "don't", 'down',
'during', 'each', 'few', 'for', 'from', 'further', 'had', "hadn't", 'has', "hasn't", 'have', "haven't",
'having', 'he', "he'd", "he'll", "he's", 'her', 'here', "here's", 'hers', 'herself', 'him', 'himself',
'his', 'how', "how's", 'i', "i'd", "i'll", "i'm", "i've", 'if', 'in', 'into', 'is', "isn't", 'it', "it's",
'its', 'itself', "let's", 'me', 'more', 'most', "mustn't", 'my', 'myself', 'no', 'nor', 'not', 'of', 'off',
'on', 'once', 'only', 'or', 'other', 'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own', 'same',
"shan't", 'she', "she'd", "she'll", "she's", 'should', "shouldn't", 'so', 'some', 'such', 'than', 'that',
"that's", 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'there', "there's", 'these', 'they',
"they'd", "they'll", "they're", "they've", 'this', 'those', 'through', 'to', 'too', 'under', 'until', 'up',
'very', 'was', "wasn't", 'we', "we'd", "we'll", "we're", "we've", 'were', "weren't", 'what', "what's",
'when', "when's", 'where', "where's", 'which', 'while', 'who', "who's", 'whom', 'why', "why's", 'with',
"won't", 'would', "wouldn't", 'you', "you'd", "you'll", "you're", "you've", 'your', 'yours', 'yourself',
'yourselves'}
_PATTERN_SUBJECT_APOSTROPHE = re.compile(r"\b(I|[Yy]ou|[Hh]e|[Ss]he|[Ii]t|[Ww]e|[Tt]hey)'\w+\b")
PATTERN_STRICT = re.compile(r"FW|(VB)\w?|NN\w?\w?")
PATTERN_LAX = re.compile(r"FW|(JJ|VB|RB)\w?|NN\w?\w?")
PATTERN_SUPER_LAX = re.compile(r".*")
_PRONOUNS_1ST_PERS_SG = {"I", "me", "my", "mine", "myself"}
_PRONOUNS_1ST_PERS_PL = {"we", "us", "our", "ours", "ourselves"}
_PRONOUNS_3RD_PERS_PL = {"they", "them", "their", "theirs", "themselves"}
PRONOUNS_3RD_PERS_SG_MALE = {"he", "him", "his", "himself"}
PRONOUNS_3RD_PERS_SG_FEMALE = {"she", "her", "hers", "herself"}
PRONOUNS_1ST_PERS = _PRONOUNS_1ST_PERS_SG | _PRONOUNS_1ST_PERS_PL
PRONOUNS_PL = _PRONOUNS_1ST_PERS_PL | _PRONOUNS_3RD_PERS_PL
PRONOUNS_SG = _PRONOUNS_1ST_PERS_SG | PRONOUNS_3RD_PERS_SG_MALE | PRONOUNS_3RD_PERS_SG_FEMALE
PRONOUNS = PRONOUNS_SG | PRONOUNS_SG
PRONOUNS_SUBJECT = {"I", "you", "he", "she", "we", "they"} #ignore "it"
def remove_punctuation(s):
return "".join(letter for letter in s if letter not in punctuation)
def get_sentence_from_conll(conll_document):
"""
Given the path to a conll format file, rebuilds a (close approximation?) of the original sentence by appending the
surface forms, separated by spaces.
:para conll_document: the contents of a CONLL file, as a list of triples
"""
return " ".join(tup[0] for tup in conll_document)
def get_pronoun_lemmas(conll_document):
"""
Returns the list of lemmatized pronouns found in the given contents of a CONLL document (list of triples).
:param conll_document: the contents of a CONLL file, as a list of triples
:return:
"""
pronouns = []
for (form, lemma, pos) in conll_document:
lemma_no_punct = remove_punctuation(lemma)
if pos in {"PRP", "PRP$"} or lemma_no_punct in PRONOUNS:
pronouns.append(lemma_no_punct)
elif _PATTERN_SUBJECT_APOSTROPHE.match(lemma_no_punct):
# identify and normalize occurrences of "I'd", "he's", "they've" and so on
pronoun = _PATTERN_SUBJECT_APOSTROPHE.sub(r"\1", lemma_no_punct)
pronouns.append(pronoun)
return pronouns
def get_content_words(conll_document, pos_regex_pattern=PATTERN_STRICT):
"""
Returns the set of content words given the contents of a CONLL file.
:param conll_document: the contents of a CONLL file, as a list of triples
:param pos_regex_pattern: every POS tag matched by this regex pattern will be considered a content word
:return: the set of content words
"""
content_words = set()
for (form, lemma, pos) in conll_document:
if pos_regex_pattern.match(pos):
content_words.add(remove_punctuation(form))
return content_words - _STOPWORDS
def avg_embedding_for_doc(document, embedding_io, content_word_func=lambda doc: get_content_words(doc)):
"""
Returns the averaged embedding of the content words of a document.
In case the given content word extraction function returns an empty set of words for a document, every word in the
document is considered to be a content word as a fallback.
:param document: some CONLL document content (list of triplets)
:param embedding_io:
:param content_word_func:
:return:
"""
content_words = content_word_func(document)
if not content_words:
content_words = get_content_words(document, PATTERN_SUPER_LAX)
words_with_known_embeddings = [word for word in content_words if embedding_io.has_embedding(word)]
word_embeddings = [embedding_io.get_embedding(word) for word in words_with_known_embeddings]
embedding_vec = np.sum(word_embeddings, axis=0)
embedding_vec /= np.linalg.norm(embedding_vec)
return embedding_vec
|
1627116
|
import rdtest
import renderdoc as rd
class VK_Buffer_Truncation(rdtest.Buffer_Truncation):
demos_test_name = 'VK_Buffer_Truncation'
internal = False
|
1627144
|
from abc import abstractmethod
from starfish.core.intensity_table.decoded_intensity_table import DecodedIntensityTable
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.pipeline.algorithmbase import AlgorithmBase
class AssignTargetsAlgorithm(metaclass=AlgorithmBase):
"""
AssignTargets assigns cell IDs to detected spots using an IntensityTable and
SegmentationMaskCollection.
"""
@abstractmethod
def run(
self,
masks: BinaryMaskCollection,
decoded_intensity_table: DecodedIntensityTable,
verbose: bool = False,
in_place: bool = False,
) -> DecodedIntensityTable:
"""Performs target (e.g. gene) assignment given the spots and the regions."""
raise NotImplementedError()
|
1627260
|
from typing import Dict, Optional
from io import BytesIO
from geventhttpclient import HTTPClient
from geventhttpclient.url import URL
from geventhttpclient.response import HTTPSocketPoolResponse
from thrift.transport.TTransport import TTransportBase
from line4py.config import LONG_POLLING_V4_PATH
class THttpClient(TTransportBase):
__custom_headers: Dict[str, str]
__wbuf: BytesIO
__response: Optional[HTTPSocketPoolResponse] = None
__looped: bool = False
__concurrency: int
__url: URL
__once_client: HTTPClient
client: HTTPClient
code: Optional[int] = None
message: Optional[str] = None
headers: Optional[str] = None
def __init__(self,
url: str,
headers: Dict[str, str],
concurrency: int = 30,
client: Optional[HTTPClient] = None):
self.__url = URL(url)
self.__custom_headers = headers
self.__concurrency = concurrency
if not client:
self.client = HTTPClient(self.__url.host,
self.__url.port,
concurrency=self.__concurrency,
ssl=True)
else:
self.client = client
if self.__url.path != LONG_POLLING_V4_PATH:
self.flush = self.__flush
else:
self.open()
self.flush = self.__flush_and_reconnect
self.__wbuf = BytesIO()
def __flush_and_reconnect(self):
if not self.__looped:
self.close()
self.open()
self.__looped = True
else:
self.flush = self.__flush
self.close()
self.flush()
return
data = self.__get_data()
headers = self.__get_headers(data)
self.__response = self.__once_client.request("POST",
self.__url.path,
body=data,
headers=headers)
self.__set_response(self.__response)
def __flush(self):
data = self.__get_data()
headers = self.__get_headers(data)
self.__response = self.client.request("POST",
self.__url.path,
body=data,
headers=headers)
self.__set_response(self.__response)
def __get_data(self) -> bytes:
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
return data
def __get_headers(self, data: bytes) -> Dict[str, str]:
headers = {
"Content-Type": "application/x-thrift",
"Content-Length": str(len(data)),
'connection': 'keep-alive',
}
headers.update(self.__custom_headers)
return headers
def __set_response(self, response: HTTPSocketPoolResponse):
self.code = response.status_code
self.message = response.status_message
self.headers = response.headers
def open(self):
self.__once_client = HTTPClient(self.__url.host,
self.__url.port,
concurrency=self.__concurrency,
ssl=True)
def close(self):
self.__once_client.close()
self.__response = None
def read(self, sz: int) -> bytes:
return self.__response.read(sz)
def write(self, buf: bytes):
self.__wbuf.write(buf)
|
1627309
|
from sparknlp_jsl.base import FeaturesAssembler
"""
The FeaturesAssembler is used to collect features from different columns.
It can collect features from single value columns (anything which can be cast to a float, if casts fails then the value is set to 0),
array columns or SparkNLP annotations (if the annotation is an embedding, it takes the embedding, otherwise tries to cast the result field).
The output of the transformer is a FEATURE_VECTOR annotation (the numeric vector is in the embeddings field).
"""
class SparkNLPFeatureAssembler:
@staticmethod
def get_default_model():
return FeaturesAssembler() \
.setInputCols(["%%%feature_elements%%%"]) \
.setOutputCol("feature_vector") \
|
1627342
|
from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, PageChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from base.forms import TopicCollectionPageForm
from pages.base_page.models import JanisBasePage
from base.models.translated_image import TranslatedImage
from base.models.widgets import countMe, countMeTextArea
from publish_preflight.requirements import FieldPublishRequirement
from snippets.theme.models import Theme
class TopicCollectionPage(JanisBasePage):
janis_url_page_type = "topiccollection"
description = models.TextField(blank=True)
theme = models.ForeignKey(
Theme,
on_delete=models.SET_NULL,
related_name='topic_collection_pages',
null=True, blank=True,
)
image = models.ForeignKey(TranslatedImage, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
base_form_class = TopicCollectionPageForm
publish_requirements = (
FieldPublishRequirement('theme'),
)
content_panels = [
FieldPanel('title_en', widget=countMe),
FieldPanel('title_es', widget=countMe),
FieldPanel('title_ar'),
FieldPanel('title_vi'),
FieldPanel('slug_en'),
FieldPanel('slug_es'),
FieldPanel('slug_ar'),
FieldPanel('slug_vi'),
FieldPanel('description', widget=countMeTextArea),
FieldPanel('theme'),
ImageChooserPanel('image'),
]
def janis_urls(self):
# should publish at /theme_slug/topic_collection_slug/
if self.theme and self.theme.slug:
return [f'/{self.theme.slug}/{self.slug_en}/']
return []
def janis_instances(self):
"""
Topic Collections do not have contextual nav on Janis
"""
# should publish at /theme_slug/topic_collection_slug/
if self.theme and self.theme.slug:
return [{'url': f'/{self.theme.slug}/{self.slug_en}/', 'parent': None, 'grandparent': None}]
return []
class JanisBasePageWithTopicCollections(JanisBasePage):
def janis_urls(self):
# Add the urls for each topic collection, these pages only
# should publish at /theme_slug/topic_collection_slug/page_slug
urls = []
for base_page_topic_collection in self.topic_collections.all():
for topic_collection_url in base_page_topic_collection.topic_collection.janis_urls():
urls.append(f'{topic_collection_url}{self.slug_en}/')
return urls
def janis_instances(self):
# Add the urls for each topic collection, these pages only
# should publish at /theme_slug/topic_collection_slug/topic_page_slug
instances = []
for base_page_topic_collection in self.topic_collections.all():
for topic_collection_url in base_page_topic_collection.topic_collection.janis_instances():
instances.append({
'url': f'{topic_collection_url["url"]}{self.slug_en}/',
'parent': base_page_topic_collection.topic_collection,
'grandparent': None,
})
return instances
class JanisBasePageTopicCollection(ClusterableModel):
page = ParentalKey(JanisBasePageWithTopicCollections, related_name='topic_collections')
topic_collection = models.ForeignKey('topic_collection_page.TopicCollectionPage',
verbose_name='Select a Topic Collection', related_name='+',
on_delete=models.CASCADE)
panels = [
PageChooserPanel('topic_collection'),
]
|
1627349
|
import csv
from logging import Logger
import os
import sys
from typing import List
import numpy as np
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from torch.optim import Adam, SGD
import wandb
from .evaluate import evaluate, evaluate_predictions
from .predict import predict
from .train import train
from chemprop.args import TrainArgs
from chemprop.data import StandardScaler, MoleculeDataLoader
from chemprop.data.utils import get_class_sizes, get_data, get_task_names, split_data
from chemprop.models import MoleculeModel
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint, save_smiles_splits
from chemprop.bayes_utils import neg_log_like, scheduler_const
from .bayes_tr.swag_tr import train_swag
from .bayes_tr.sgld_tr import train_sgld
from .bayes_tr.gp_tr import train_gp
from .bayes_tr.bbp_tr import train_bbp
from .bayes_tr.dun_tr import train_dun
from chemprop.bayes import predict_std_gp, predict_MCdepth
def run_training(args: TrainArgs, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
debug = info = print
# Print command line and args
debug('Command line')
debug(f'python {" ".join(sys.argv)}')
debug('Args')
debug(args)
# Save args
args.save(os.path.join(args.save_dir, 'args.json'))
# Get data
debug('Loading data')
args.task_names = args.target_columns or get_task_names(args.data_path)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.features_scaling:
features_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(features_scaler)
test_data.normalize_features(features_scaler)
else:
features_scaler = None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = neg_log_like
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Automatically determine whether to cache
if len(data) <= args.cache_cutoff:
cache = True
num_workers = 0
else:
cache = False
num_workers = args.num_workers
# Create data loaders
train_data_loader = MoleculeDataLoader(
dataset=train_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache,
class_balance=args.class_balance,
shuffle=True,
seed=args.seed
)
val_data_loader = MoleculeDataLoader(
dataset=val_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache
)
test_data_loader = MoleculeDataLoader(
dataset=test_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache
)
###########################################
########## Outer loop over ensemble members
###########################################
for model_idx in range(args.ensemble_start_idx, args.ensemble_start_idx + args.ensemble_size):
# Set pytorch seed for random initial weights
torch.manual_seed(args.pytorch_seeds[model_idx])
######## set up all logging ########
# make save_dir
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
# make results_dir
results_dir = os.path.join(args.results_dir, f'model_{model_idx}')
makedirs(results_dir)
# initialise wandb
os.environ['WANDB_MODE'] = 'dryrun'
wandb.init(
name=args.wandb_name+'_'+str(model_idx),
project=args.wandb_proj,
reinit=True)
print('WANDB directory is:')
print(wandb.run.dir)
####################################
# Load/build model
if args.checkpoint_path is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_path}')
model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model.pt', device=args.device, logger=logger)
else:
debug(f'Building model {model_idx}')
model = MoleculeModel(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.to(args.device)
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
# Optimizer
optimizer = Adam([
{'params': model.encoder.parameters()},
{'params': model.ffn.parameters()},
{'params': model.log_noise, 'weight_decay': 0}
], lr=args.init_lr, weight_decay=args.weight_decay)
# Learning rate scheduler
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in range(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data_loader=train_data_loader,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger
)
val_scores = evaluate(
model=model,
data_loader=val_data_loader,
args=args,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
wandb.log({"Validation MAE": avg_val_score})
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
if epoch == args.noam_epochs - 1:
optimizer = Adam([
{'params': model.encoder.parameters()},
{'params': model.ffn.parameters()},
{'params': model.log_noise, 'weight_decay': 0}
], lr=args.final_lr, weight_decay=args.weight_decay)
scheduler = scheduler_const([args.final_lr])
# load model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger)
# SWAG training loop, returns swag_model
if args.swag:
model = train_swag(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# SGLD loop, which saves nets
if args.sgld:
model = train_sgld(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# GP loop
if args.gp:
model, likelihood = train_gp(
model,
train_data,
val_data,
num_workers,
cache,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# BBP
if args.bbp:
model = train_bbp(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# DUN
if args.dun:
model = train_dun(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
##################################
########## Inner loop over samples
##################################
for sample_idx in range(args.samples):
# draw model from SWAG posterior
if args.swag:
model.sample(scale=1.0, cov=args.cov_mat, block=args.block)
# draw model from collected SGLD models
if args.sgld:
model = load_checkpoint(os.path.join(save_dir, f'model_{sample_idx}.pt'), device=args.device, logger=logger)
# make predictions
test_preds = predict(
model=model,
data_loader=test_data_loader,
args=args,
scaler=scaler,
test_data=True,
bbp_sample=True)
#######################################################################
#######################################################################
##### SAVING STUFF DOWN
if args.gp:
# get test_preds_std (scaled back to original data)
test_preds_std = predict_std_gp(
model=model,
data_loader=test_data_loader,
args=args,
scaler=scaler,
likelihood = likelihood)
# 1 - MEANS
np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds))
# 2 - STD, combined aleatoric and epistemic (we save down the stds, always)
np.savez(os.path.join(results_dir, f'predsSTDEV_{sample_idx}'), np.array(test_preds_std))
else:
# save test_preds and aleatoric uncertainties
if args.dun:
log_cat = model.log_cat.detach().cpu().numpy()
cat = np.exp(log_cat) / np.sum(np.exp(log_cat))
np.savez(os.path.join(results_dir, f'cat_{sample_idx}'), cat)
# samples from categorical dist and saves a depth MC sample
depth_sample = np.random.multinomial(1, cat).nonzero()[0][0]
test_preds_MCdepth = predict_MCdepth(
model=model,
data_loader=test_data_loader,
args=args,
scaler=scaler,
d=depth_sample
)
np.savez(os.path.join(results_dir, f'predsMCDEPTH_{sample_idx}'), np.array(test_preds_MCdepth))
if args.swag:
log_noise = model.base.log_noise
else:
log_noise = model.log_noise
noise = np.exp(log_noise.detach().cpu().numpy()) * np.array(scaler.stds)
np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds))
np.savez(os.path.join(results_dir, f'noise_{sample_idx}'), noise)
#######################################################################
#######################################################################
# add predictions to sum_test_preds
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# evaluate predictions using metric function
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# compute average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx}, sample {sample_idx} test {args.metric} = {avg_test_score:.6f}')
#################################
########## Bayesian Model Average
#################################
# note: this is an average over Bayesian samples AND components in an ensemble
# compute number of prediction iterations
pred_iterations = args.ensemble_size * args.samples
# average predictions across iterations
avg_test_preds = (sum_test_preds / pred_iterations).tolist()
# evaluate
BMA_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# average scores across tasks
avg_BMA_test_score = np.nanmean(BMA_scores)
info(f'BMA test {args.metric} = {avg_BMA_test_score:.6f}')
return BMA_scores
|
1627391
|
from pubnub.models.consumer.v3.pn_resource import PNResource
class Channel(PNResource):
def __init__(self, resource_name=None, resource_pattern=None):
super(Channel, self).__init__(resource_name, resource_pattern)
@staticmethod
def id(channel_id):
channel = Channel(resource_name=channel_id)
return channel
@staticmethod
def pattern(channel_pattern):
channel = Channel(resource_pattern=channel_pattern)
return channel
def read(self):
self._read = True
return self
def manage(self):
self._manage = True
return self
def write(self):
self._write = True
return self
def delete(self):
self._delete = True
return self
def get(self):
self._get = True
return self
def update(self):
self._update = True
return self
def join(self):
self._join = True
return self
|
1627444
|
import sys
import time
import weaver.client as client
import simple_client
# creating graph
nodes = [None] * 5
coord_id = 0
c = client.Client(client._CLIENT_ID+1, coord_id)
sc = simple_client.simple_client(c)
tx_id = c.begin_tx()
center = c.create_node(tx_id)
nodes[0] = c.create_node(tx_id)
nodes[1] = c.create_node(tx_id)
nodes[2] = c.create_node(tx_id)
c.create_edge(tx_id, center, nodes[0])
c.create_edge(tx_id, center, nodes[1])
c.create_edge(tx_id, center, nodes[2])
nodes[3] = c.create_node(tx_id)
c.create_edge(tx_id, nodes[2], nodes[3])
nodes[4] = c.create_node(tx_id)
c.create_edge(tx_id, nodes[0], nodes[4])
c.set_node_property(tx_id, center, 'name', 'BAD1')
c.set_node_property(tx_id, nodes[0], 'name', 'BAD2')
c.set_node_property(tx_id, nodes[2], 'name', 'Scarlet')
c.set_node_property(tx_id, nodes[3], 'name', 'Greg')
c.set_node_property(tx_id, nodes[4], 'name', 'Ayush')
c.create_edge(tx_id, nodes[0], nodes[1])
c.create_edge(tx_id, nodes[1], nodes[2])
c.end_tx(tx_id)
print nodes
print sc.two_neighborhood(center, "name", caching=True)
print sc.two_neighborhood(center, "nope", caching=True)
tx_id = c.begin_tx()
new_2_hop = c.create_node(tx_id)
c.create_edge(tx_id, nodes[1], new_2_hop)
c.set_node_property(tx_id, new_2_hop, 'name', 'Sam')
assert(c.end_tx(tx_id))
print "adding two hop neighbor"
print sc.two_neighborhood(center, "name", caching=True)
print "adding 1 and two hop neighbors"
tx_id = c.begin_tx()
new_1_hop = c.create_node(tx_id)
to_del = c.create_edge(tx_id, center, new_1_hop)
c.set_node_property(tx_id, new_1_hop, 'name', 'Robert')
new_2_hops1 = c.create_node(tx_id)
new_2_hops2 = c.create_node(tx_id)
c.create_edge(tx_id, new_1_hop, new_2_hops1)
c.create_edge(tx_id, new_1_hop, new_2_hops2)
c.set_node_property(tx_id, new_2_hops1, 'name', 'Jianeng')
c.set_node_property(tx_id, new_2_hops2, 'name', 'Sarah')
assert(c.end_tx(tx_id))
print sc.two_neighborhood(center, "name", caching=True)
print "deleting node"
tx_id = c.begin_tx()
c.delete_edge(tx_id, to_del, center)
c.delete_node(tx_id, new_1_hop)
assert(c.end_tx(tx_id))
print sc.two_neighborhood(center, "name", caching=True)
print "done!"
|
1627455
|
import requests
def create_invitation(context, tenant, alias, invitation_type):
data = {"alias": alias, "invitation_type": invitation_type}
response = requests.post(
context.config.userdata.get("traction_host")
+ "/tenant/v1/contacts/create-invitation",
json=data,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def list_contacts(context, tenant, params):
response = requests.get(
context.config.userdata.get("traction_host") + "/tenant/v1/contacts",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def get_contact(context, tenant, contact_id, params: dict | None = {}):
response = requests.get(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/contacts/{contact_id}",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def update_contact(context, tenant, contact_id, payload):
response = requests.put(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/contacts/{contact_id}",
json=payload,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def delete_contact(context, tenant, contact_id):
response = requests.delete(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/contacts/{contact_id}",
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def create_schema_template(context, tenant, payload):
response = requests.post(
context.config.userdata.get("traction_host")
+ "/tenant/v1/governance/schema_templates/",
json=payload,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def list_schema_templates(context, tenant, params):
response = requests.get(
context.config.userdata.get("traction_host")
+ "/tenant/v1/governance/schema_templates",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def list_credential_templates(context, tenant, params):
response = requests.get(
context.config.userdata.get("traction_host")
+ "/tenant/v1/governance/credential_templates",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def get_schema_template(context, tenant, item_id, params: dict | None = {}):
response = requests.get(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/governance/schema_templates/{item_id}",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def update_schema_template(context, tenant, item_id, payload):
response = requests.put(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/governance/schema_templates/{item_id}",
json=payload,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def delete_schema_template(context, tenant, item_id):
response = requests.delete(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/governance/schema_templates/{item_id}",
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def create_credential_template(context, tenant, payload):
response = requests.post(
context.config.userdata.get("traction_host")
+ "/tenant/v1/governance/credential_templates/",
json=payload,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def list_credential_templates(context, tenant, params):
response = requests.get(
context.config.userdata.get("traction_host")
+ "/tenant/v1/governance/credential_templates",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def get_credential_template(context, tenant, item_id, params: dict | None = {}):
response = requests.get(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/governance/credential_templates/{item_id}",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def update_credential_template(context, tenant, item_id, payload):
response = requests.put(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/governance/credential_templates/{item_id}",
json=payload,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def delete_credential_template(context, tenant, item_id):
response = requests.delete(
context.config.userdata.get("traction_host")
+ f"/tenant/v1/governance/credential_templates/{item_id}",
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def send_message(context, tenant, contact_id, content):
data = {"contact_id": contact_id, "content": content}
response = requests.post(
context.config.userdata.get("traction_host")
+ "/tenant/v1/messages/send-message",
json=data,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def list_messages(context, tenant, params):
response = requests.get(
context.config.userdata.get("traction_host") + "/tenant/v1/messages",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def get_message(context, tenant, item_id, params: dict | None = {}):
response = requests.get(
context.config.userdata.get("traction_host") + f"/tenant/v1/messages/{item_id}",
params=params,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def update_message(context, tenant, item_id, payload):
response = requests.put(
context.config.userdata.get("traction_host") + f"/tenant/v1/messages/{item_id}",
json=payload,
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def delete_message(context, tenant, item_id):
response = requests.delete(
context.config.userdata.get("traction_host") + f"/tenant/v1/messages/{item_id}",
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def get_tenant_self(context, tenant):
response = requests.get(
context.config.userdata.get("traction_host") + "/tenant/v1/admin/self",
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
def tenant_make_issuer(context, tenant: str):
response = requests.post(
context.config.userdata.get("traction_host") + "/tenant/v1/admin/make-issuer",
headers=context.config.userdata[tenant]["auth_headers"],
)
return response
|
1627462
|
from pygsti.tools import pdftools
from ..util import BaseCase
class PDFToolsTester(BaseCase):
def test_pdf_tools(self):
p = {'a': 0., 'b': 1.0}
q = {'a': 0.5, 'b': 0.5}
self.assertAlmostEqual(pdftools.tvd(p, q), .5)
self.assertAlmostEqual(pdftools.classical_fidelity(p, q), .5)
p = {'b': 1.0}
q = {'a': 0.5, 'b': 0.5}
self.assertAlmostEqual(pdftools.tvd(p, q), .5)
self.assertAlmostEqual(pdftools.classical_fidelity(p, q), .5)
p = {'b': 1.0}
q = {'a': 1.0}
self.assertAlmostEqual(pdftools.tvd(p, q), 1.)
self.assertAlmostEqual(pdftools.classical_fidelity(p, q), 0.)
p = {'a': 0., 'b': 1.0}
q = {'a': 1.0, 'b': .0}
self.assertAlmostEqual(pdftools.tvd(p, q), 1.)
self.assertAlmostEqual(pdftools.classical_fidelity(p, q), 0.)
p = {'a': 0., 'b': 1.0}
q = {'a': 0., 'b': 1.0}
self.assertAlmostEqual(pdftools.tvd(p, q), 0.)
self.assertAlmostEqual(pdftools.classical_fidelity(p, q), 1.)
|
1627518
|
import os
import glob
import shutil
import logging
import traceback
import sys
import re
import subprocess
import yaml
from pentagon.component import ComponentBase
from pentagon.helpers import render_template
from pentagon.defaults import AWSPentagonDefaults as PentagonDefaults
class Cluster(ComponentBase):
_path = os.path.dirname(__file__)
def add(self, destination):
for key in PentagonDefaults.kubernetes:
if not self._data.get(key):
self._data[key] = PentagonDefaults.kubernetes[key]
if not self._data.get('network_cidr_base'):
self._data['network_cidr_base'] = PentagonDefaults.vpc['vpc_cidr_base']
for key in ['authorization', 'networking']:
self._data[key] = yaml.dump(self._data[key])
return super(Cluster, self).add(destination)
def get(self, destination):
self._cluster_name = self._data.get('name', os.environ.get('CLUSTER_NAME'))
self._bucket = self._data.get('kops_state_store', os.environ.get('KOPS_STATE_STORE'))
self._destination = destination
if self._bucket is None:
logging.error("kops_state_store required.")
sys.exit(1)
if self._cluster_name is None:
logging.error("name is required.")
sys.exit(1)
os.mkdir(self._cluster_name)
os.chdir(self._cluster_name)
self._get_cluster_yaml()
for ig in self._cluster_instance_groups:
self._get_instance_group_yaml(ig)
self._get_cluster_admin_secret()
@property
def _cluster_instance_groups(self):
# get igs yaml
logging.debug("Getting instance groups.")
args = ['kops',
'get',
'ig',
'--name={}'.format(self._cluster_name),
'--state=s3://{}'.format(self._bucket)]
return [ig.split("\t")[0] for ig in subprocess.check_output(args).split("\n")][1:-1]
def _get_instance_group_yaml(self, ig):
args = ['kops',
'get',
'ig',
ig,
'--name={}'.format(self._cluster_name),
'--state=s3://{}'.format(self._bucket),
'-oyaml']
ig_yaml = subprocess.check_output(args)
file_mode = 'w'
if "master" in ig:
ig_file_name = "master.yml"
file_mode = 'a'
else:
ig_file_name = "{}.yml".format(ig)
with open(ig_file_name, file_mode) as ig_file:
ig_file.write("---\n")
ig_file.write("{}\n".format(ig_yaml))
ig_file.close()
def _get_cluster_admin_secret(self):
# get secret sorta
logging.debug("Getting ssh key secret. This will require transformation before a new cluster can be created")
with open('secret.sh', 'w') as sf:
args = ['kops',
'get',
'secret',
'admin',
'--name={}'.format(self._cluster_name),
'--state=s3://{}'.format(self._bucket)]
subprocess.Popen(args, stdout=sf)
def _get_cluster_yaml(self):
# get cluster yaml
logging.debug("Getting cluster.")
with open('cluster.yml', 'w') as cf:
args = ['kops',
'get',
'cluster',
'--name={}'.format(self._cluster_name),
'--state=s3://{}'.format(self._bucket),
'-oyaml']
p = subprocess.Popen(args, stdout=cf)
stdout, stderr = p.communicate()
if p.returncode != 0:
logging.error("Error getting cluster: {}".format(stderr))
sys.exit(1)
|
1627530
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from CapsuleNet import CapsuleNet, CapsuleLoss
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description='CapsuelNet Pytorch MINIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
return args
def train(args, num_classes, model, optimizer, epoch_index, train_loader, capsule_loss):
model.train()
# num_batch = int(60000 / args.batch_size)
# for batch_idx, (X, y) in tqdm(enumerate(train_loader), total=num_batch, ncols=70, leave=False, unit='b'):
for batch_idx, (X, y) in enumerate(train_loader):
y_onehot = torch.zeros(y.size()[0],num_classes).scatter_(1, y.unsqueeze(-1), 1)
if args.cuda:
X, y = X.cuda(), y.cuda()
X, y = Variable(X), Variable(y)
optimizer.zero_grad()
prob, X_l2norm, reconstructions = model(X, y, with_label=True)
loss = capsule_loss(num_classes, X, y_onehot, X_l2norm, reconstructions)
loss.backward()
optimizer.step()
# break
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch_index, batch_idx * len(X), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test(args, num_classes, model, test_loader,capsule_loss):
model.eval()
test_loss = 0
num_correct = 0
# for X, y in test_loader:
for batch_idx, (X, y) in enumerate(test_loader):
y_onehot = torch.zeros(y.size()[0],num_classes).scatter_(1, y.unsqueeze(-1), 1)
if args.cuda:
X, y = X.cuda(), y.cuda()
X, y = Variable(X, volatile=True), Variable(y)
prob,X_l2norm, reconstructions = model(X, y, with_label=True)
loss = capsule_loss(num_classes, X, y_onehot, X_l2norm, reconstructions)
test_loss += loss
pred_y = prob.data.max(1, keepdim=True)[1]
num_correct += pred_y.eq(y.data.view_as(pred_y)).cpu().sum()
if batch_idx % args.log_interval == 0:
# import pdb; pdb.set_trace()
print('Test Index:[{}/{}]'.format(batch_idx * len(X), len(test_loader.dataset)))
test_loss /= len(test_loader.dataset)
# import pdb ; pdb.set_trace()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss.data[0], num_correct, len(test_loader.dataset), 100. * num_correct / len(test_loader.dataset)))
def main():
args = parse_args()
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
##############################################################
## Load Data from torchvision ##
##############################################################
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])),
batch_size=args.batch_size,
shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])),
batch_size=args.test_batch_size,
shuffle=True, **kwargs)
##############################################################
## Load model and set optimizer ##
##############################################################
capsule_model = CapsuleNet()
if args.cuda:
capsule_model.cuda()
# optimizer = optim.SGD(capsule_model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = optim.Adam(capsule_model.parameters())
capsule_loss = CapsuleLoss()
num_calsses=10
for epoch_index in range(1, args.epochs + 1):
train(args, num_calsses, capsule_model, optimizer, epoch_index, train_loader, capsule_loss)
test(args, num_calsses, capsule_model, test_loader, capsule_loss)
if __name__ == '__main__':
main()
|
1627554
|
from horch.models.modules import Norm, Conv2d
from horch.models.attention import SEModule
from horch.models.cifar.pyramidnet import Bottleneck as PyrUnit
from torch import nn as nn
from pytorchcv.models.shufflenetv2b import ShuffleUnit
def shuffle_block(in_channels, out_channels):
return ShuffleUnit(in_channels, out_channels, downsample=True, use_se=True, use_residual=False, shuffle_group_first=True)
def pyramid_block(in_channels, out_channels):
assert in_channels <= out_channels
return PyrUnit(in_channels, out_channels // PyrUnit.expansion, stride=2)
def mb_conv_block(in_channels, out_channels, expand_ratio=4, kernel_size=3):
if expand_ratio == 1:
return MBConv(in_channels, in_channels, out_channels, kernel_size=kernel_size, stride=2)
else:
return MBConv(in_channels, out_channels * expand_ratio, out_channels, kernel_size=kernel_size, stride=2)
class MBConv(nn.Sequential):
def __init__(self, in_channels, channels, out_channels, kernel_size, stride=1, se_ratio=1 / 16):
super().__init__()
self.bn = Norm('default', in_channels)
if in_channels != channels:
self.expand = Conv2d(in_channels, channels, kernel_size=1,
norm='default', act='default')
self.dwconv = Conv2d(channels, channels, kernel_size, stride=stride, groups=channels,
norm='default', act='default')
if se_ratio:
assert 0 < se_ratio < 1
self.se = SEModule(channels, reduction=int(1 / se_ratio))
if out_channels is not None:
self.project = Conv2d(channels, out_channels, kernel_size=1,
norm='default')
self.use_res_connect = stride == 1 and in_channels == out_channels
def forward(self, x):
identity = x
x = super().forward(x)
if self.use_res_connect:
x += identity
return x
|
1627561
|
import logging
from django.views.generic import TemplateView, FormView, View
from django.contrib import messages
from django.core.mail import EmailMessage
from django.conf import settings
import mailchimp
from braces.views import (
AjaxResponseMixin,
JSONResponseMixin,
LoginRequiredMixin,
)
from glucoses.models import Glucose
from .forms import ContactForm
logger = logging.getLogger(__name__)
class HomePageView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
context['glucose_count'] = Glucose.objects.count()
return context
class HelpPageView(LoginRequiredMixin, FormView):
success_url = '.'
form_class = ContactForm
template_name = 'core/help.html'
def get_initial(self):
return {
'email': self.request.user.email
}
def form_valid(self, form):
success_message = '''Email sent! We'll try to get back to you as
soon as possible.'''
messages.add_message(self.request, messages.SUCCESS, success_message)
return super(HelpPageView, self).form_valid(form)
def form_invalid(self, form):
failure_message = 'Email not sent. Please try again.'
messages.add_message(self.request, messages.WARNING, failure_message)
return super(HelpPageView, self).form_invalid(form)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
support_email = settings.CONTACTS['support_email']
message = 'Sent By: %s (%s)\n\n%s' % (
form.cleaned_data['email'],
self.request.user.username,
form.cleaned_data['message'])
email = EmailMessage(
from_email=support_email,
subject='[Help] %s ' % form.cleaned_data['subject'],
body=message,
to=[support_email])
email.send()
return self.form_valid(form)
else:
return self.form_invalid(form)
class MailingListSignupAjaxView(JSONResponseMixin, AjaxResponseMixin, View):
"""
Sign up an email address to a MailChimp list.
"""
def post_ajax(self, request, *args, **kwargs):
email = request.POST.get('email').strip().lower()
mailchimp_list_id = settings.MAILCHIMP_LIST_ID
response_dict = {
'message': '{0} successfully subscribed to {1}!'.format(
email, mailchimp_list_id),
}
mc = mailchimp.Mailchimp(settings.MAILCHIMP_API_KEY)
try:
mc.lists.subscribe(
id=mailchimp_list_id,
email={'email': email},
update_existing=True,
double_optin=True,
)
logger.info('%s successfully subscribed to %s', email,
mailchimp_list_id)
except mailchimp.Error, e:
logger.error('A MailChimp error occurred: %s', e)
response_dict['message'] = 'Sorry, an error occurred.'
return self.render_json_response(response_dict, status=500)
return self.render_json_response(response_dict)
|
1627589
|
from seleniumwire.thirdparty.mitmproxy.addons import core
from seleniumwire.thirdparty.mitmproxy.addons import streambodies
from seleniumwire.thirdparty.mitmproxy.addons import upstream_auth
def default_addons():
return [
core.Core(),
streambodies.StreamBodies(),
upstream_auth.UpstreamAuth(),
]
|
1627635
|
import abc
class CoverageFile(object):
"""
Templated class for Lighthouse-compatible code coverage file reader.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, filepath=None):
self.filepath = filepath
self.modules = {}
self._parse()
#--------------------------------------------------------------------------
# Parsing Routines
#--------------------------------------------------------------------------
@abc.abstractmethod
def _parse(self):
"""
Load and parse coverage data from the file defined by self.filepath
Within this function, a custom CoverageFile is expected to attempt to
parse the coverage file from disk. If the coverage file does not appear
to match the format expected by this parser -- that is okay.
Should this parser crash and burn, the CoverageReader will simply move
on to the next available parser and discard this attempt.
This function should *only* parse & categorize the coverage data that
it loads from disk. If this function returns without error, the
CoverageReader will attempt to call one of the get() functions later
to retrieve the data you have loaded.
The best coverage file formats will contain some sort of mapping
for the coverage data that ties it to a module or binary that was in
the instrumented process space.
If this mapping in known, then this function should strive to store
the coverage data in the self.modules dictionary, where
self.modules[module_name] = [ coverage_addresses ]
"""
raise NotImplementedError("Coverage parser not implemented")
#--------------------------------------------------------------------------
# Public
#--------------------------------------------------------------------------
#
# if you are writing a parser for a custom coverage file format, your
# parser is *REQUIRED* to implement one of the following routines.
#
# the CoverageReader well attempt to retrieve parsed data from this class
# using one of the function below.
#
def get_addresses(self, module_name=None):
"""
Return coverage data for the named module as absolute addresses.
If no name is given / available via self.modules, the trace is assumed
to be a an ABSOLUTE ADDRESS TRACE.
These are arugably the least flexible kind of traces available, but are
still provided as an option. This fuction should return a list of
integers representing absolute coverage addresses that match the open
disassembler database...
coverage_addresses = [address, address1, address2, ...]
"""
raise NotImplementedError("Absolute addresses not supported by this log format")
def get_offsets(self, module_name):
"""
Return coverage data for the named module as relative offets.
This function should return a list of integers representing the
relative offset of an executed instruction OR basic block from the
base of the requested module (module_name).
It is *okay* to return an instruction trace, OR a basic block trace
from thin function. Lighthoue will automatically detect basic block
based traces and 'explode' them into instruction traces.
coverage_data = [offset, offset2, offset3, ...]
"""
raise NotImplementedError("Relative addresses not supported by this log format")
def get_offset_blocks(self, module_name):
"""
Return coverage data for the named module in block form.
This function should return a list of tuples representing the coverage
for the requested module (module_name). The tuples must be in the form
of (offset, size).
offset - a relative offset from the module_name base address
size - the size of the instruction, block, or sequence executed
eg, if a basic block of 24 bytes in length at kernel32.dll+0x4182 was
executed, its tuple would be (0x4182, 24).
The complete list coverage data returned by thin function should be in
the following form:
coverage_data = [(offset, size), (offset1, size1), ...]
"""
raise NotImplementedError("Block form not supported by this log format")
|
1627636
|
from docopt import docopt, DocoptExit
from chemaboxwriters.ontospecies import write_abox
import json
doc = """aboxwriter
Usage:
ospecies <fileOrDir> [--inp-file-type=<type>]
[--qc-log-ext=<ext>]
[--out-dir=<dir>]
[--out-base-name=<name>]
[--handlers-args=<handlargs>]
Options:
--inp-file-type=<type> Types of the allowed input files
to the ospecies abox writer:
- quantum calculation log [default: qc_log]
- quantum calculation json [qc_json]
- ontospecies meta json [os_json]
- ontospecies meta csv [csv]
--qc-log-ext=<ext> Extensions of the quantum
calculation log files, defaults
to ".log, .g09" if not specified
--out-dir=<dir> Output directory to write the
abox files to. If not provided
defaults to the directory of the
input file
--out-base-name=<name> Base name of the produced output
files. If not provided, defaults
to the input file base name.
--handlers-args=<handlargs> Any supported stage handlers' [default: {}]
arguments. The arguments are passed
as a json formatted string, where
the first level keys are handlers
names followed by their arguments
names and values, e.g:
{\\"QC_JSON_TO_OS_JSON\\": {\\"random_id\\": \\"123\\"}}
with all double quotes escaped.
"""
def start():
try:
args = docopt(doc)
except DocoptExit:
raise DocoptExit('Error: ospecies called with wrong arguments.')
try:
handlerFuncKwargs = json.loads(args['--handlers-args'])
except json.decoder.JSONDecodeError:
print('Error: Wrong --handlers-args input.')
return
write_abox(
fileOrDir=args['<fileOrDir>'],
inpFileType=args['--inp-file-type'],
outDir=args['--out-dir'],
qcLogExt=args['--qc-log-ext'],
outBaseName=args['--out-base-name'],
handlerFuncKwargs= handlerFuncKwargs
)
if __name__ == '__main__':
start()
|
1627649
|
from __future__ import absolute_import, print_function
try:
# PY2
# noinspection PyUnresolvedReferences
from ConfigParser import ConfigParser
except ImportError:
# PY3
# noinspection PyUnresolvedReferences
from configparser import ConfigParser
from itertools import chain
from warnings import warn
import functools
import click
import yaml
import six
import ast
import sys
import os
__author__ = 'bergundy'
notify = functools.partial(print, file=sys.stderr)
DEFAULT_ENV_VAR = 'CONF'
def load_from_env(module, env_var=DEFAULT_ENV_VAR, watch=False):
return Parser(module, _parse_env(env_var), env_var=env_var, watch=watch)
def wrap(fn=None, module=None, sections=(), env_var=DEFAULT_ENV_VAR, watch=False):
if fn is None:
return functools.partial(wrap, module=module, sections=sections, env_var=env_var, watch=watch)
@functools.wraps(fn)
def wrapper(conf, **kwargs):
kwargs_to_forward = {k: v for k, v in six.iteritems(kwargs) if not k.startswith('conf_')}
overrides = (_parse_arg(k, v) for k, v in six.iteritems(kwargs) if k.startswith('conf_') and v)
parser = Parser(module, _parse_env(env_var) + conf, overrides, env_var, watch)
if watch:
kwargs_to_forward['watcher'] = parser.watcher
return fn(**kwargs_to_forward)
wrapper = click.option('-c', '--conf', multiple=True, type=click.Path(exists=True))(wrapper)
for section in sections:
wrapper = click.option('--conf-{}'.format(section), multiple=True, type=str)(wrapper)
return wrapper
def flatten_dicts(dicts):
base = {}
for d in dicts:
base.update(d)
return base
def _parse_files(files):
for f in files:
loader = _get_loader(f)
if loader is None:
continue
for section, items in iter(loader):
yield f, section, items
def loadYAML(f):
with open(f) as f:
data = yaml.load(f)
if isinstance(data, dict):
for section, config in six.iteritems(data):
if config and isinstance(config, dict):
yield section, six.iteritems(config)
def loadINI(f):
parser = ConfigParser()
parser.optionxform = str
parser.read(f)
for section in parser.sections():
yield section, ((k, parse_value(v)) for k, v in parser.items(section, raw=True) if v != '')
def parse_value(value):
# noinspection PyBroadException
try:
return ast.literal_eval(value)
except Exception:
return ast.literal_eval('"%s"' % value)
def _get_loader(f):
if f.endswith('.yaml') or f.endswith('.yml'):
return loadYAML(f)
elif f.endswith('.conf') or f.endswith('.ini'):
return loadINI(f)
else:
return None
def _extract_files_from_paths(paths):
for path in paths:
if os.path.isdir(path):
for f in sorted(os.listdir(path)):
yield os.path.join(path, f)
else:
yield path
def _parse_env(env_var):
return tuple(filter(None, os.environ.get(env_var, '').split(':')))
def _parse_arg(k, v):
return '<ARG>', k[5:], six.iteritems(flatten_dicts(map(yaml.load, v)))
class Parser(object):
def __init__(self, module, paths, overrides=None, env_var=DEFAULT_ENV_VAR, watch=False):
self.module = module
self.env_var = env_var
self.watch = watch
self.config_paths = paths
self.overrides = overrides or []
self.reload()
if self.watch:
from .inotify import Watcher
self.watcher = Watcher(self)
self.watcher.add_listener(self.watcher.ALL, self._on_key_change)
def reload(self):
for f, sect, items in chain(_parse_files(_extract_files_from_paths(self.config_paths)), self.overrides):
self._configure_section(f, sect, items)
def _on_key_change(self, section, key, value):
self._configure_section('<WATCHER>', section, [(key, value)])
def _handle_missing_section(self, section, f):
warn('No section "{}" in module "{}" originating in file: "{}"'.format(section, self.module, f))
def _handle_missing_key(self, section, key, f):
warn('No key "{}" in section "{}" in module "{}" originating in file: "{}"'
.format(key, section, self.module, f))
def _configure_section(self, f, section, items):
target = getattr(self.module, section, None)
if target is None:
# silently ignore
self._handle_missing_section(section, f)
notify('Configuring section "{}" from "{}"'.format(section, f))
for k, v in items:
try:
setattr(target, k, v)
except AttributeError:
self._handle_missing_key(section, k, f)
|
1627679
|
def pareto_frontier(Xs, Ys, maxX = True, maxY = True):
myList = sorted([[Xs[i], Ys[i]] for i in range(len(Xs))], reverse=maxX)
p_front = [myList[0]]
for pair in myList[1:]:
if maxY:
if pair[1] >= p_front[-1][1]:
p_front.append(pair)
else:
if pair[1] <= p_front[-1][1]:
p_front.append(pair)
p_frontX = [pair[0] for pair in p_front]
p_frontY = [pair[1] for pair in p_front]
return p_frontX, p_frontY
|
1627737
|
import pytest
import mock
import gym_quadrotor
import sys
# try to load pyglet, but in case we can't for whatever reason, just mock it at the module level
try:
import pyglet.gl
except:
sys.modules["pyglet.gl"] = mock.Mock()
from gym_quadrotor.envs.rendering import *
@pytest.fixture()
def renderer():
renderer = Renderer()
with mock.patch('gym_quadrotor.envs.rendering.rendering') as rendering_mock:
def new_viewer(*args):
return mock.Mock()
rendering_mock.Viewer = mock.Mock(side_effect=new_viewer)
yield renderer
def test_renderer_center(renderer):
assert renderer.center is None
# first center is directly set
renderer.set_center(1.0)
assert renderer.center == pytest.approx(1)
# second update is smoothed
renderer.set_center(2.0)
assert renderer.center == pytest.approx(1.1)
# after a reset, we again set directly
renderer.set_center(None)
assert renderer.center is None
renderer.set_center(2.0)
assert renderer.center == pytest.approx(2)
# check that viewer is correctly recentered
renderer.setup()
renderer.set_center(2.0)
renderer.viewer.set_bounds.assert_called_once_with(-5, 9, -1, 13)
def test_renderer_setup(renderer):
renderer.setup()
gym_quadrotor.envs.rendering.rendering.Viewer.assert_called_once()
# second call to setup does nothing
renderer.setup()
gym_quadrotor.envs.rendering.rendering.Viewer.assert_called_once()
def test_renderer_close(renderer):
# this is a no-op
renderer.close()
renderer.setup()
# we need to cache the viewer here, because close will reset this variable to None
viewer = renderer.viewer # type: mock.Mock
renderer.close()
viewer.close.assert_called_once()
assert renderer.viewer is None
# closing via render(close=True) call should have the same effect
renderer.setup()
viewer = renderer.viewer # type: mock.Mock
renderer.render(close=True)
viewer.close.assert_called_once()
def test_renderer_render(renderer):
obj1 = mock.Mock()
obj2 = mock.Mock()
renderer.add_object(obj1)
renderer.add_object(obj2)
renderer.render()
obj1.draw.assert_called_once()
obj2.draw.assert_called_once()
renderer.viewer.render.assert_called_once()
def test_renderer_line_drawing(renderer):
renderer.setup()
renderer.draw_line_2d(1, 2)
renderer.viewer.draw_line.assert_called_once_with(1, 2)
renderer.draw_line_3d((1, 2, 3), (4, 5, 6))
renderer.viewer.draw_line.assert_called_with((1, 3), (4, 6))
|
1627762
|
from starry.compat import theano
from starry.compat import tt
import numpy as np
import starry
import matplotlib.pyplot as plt
import pytest
@pytest.fixture
def model():
class Model:
def __init__(self):
self.map = starry.Map(ydeg=1, reflected=True)
_b = tt.dvector("b")
_theta = tt.dvector("theta")
_bo = tt.dvector("bo")
_ro = tt.dscalar("ro")
_sigr = tt.dscalar("sigr")
self._s = theano.function(
[_b, _theta, _bo, _ro, _sigr],
self.map.ops.sT(_b, _theta, _bo, _ro, _sigr),
)
self._dsdb = [
theano.function(
[_b, _theta, _bo, _ro, _sigr],
tt.grad(
self.map.ops.sT(_b, _theta, _bo, _ro, _sigr)[0, n], _b
),
)
for n in range(self.map.Ny)
]
self._dsdtheta = [
theano.function(
[_b, _theta, _bo, _ro, _sigr],
tt.grad(
self.map.ops.sT(_b, _theta, _bo, _ro, _sigr)[0, n],
_theta,
),
)
for n in range(self.map.Ny)
]
self._dsdbo = [
theano.function(
[_b, _theta, _bo, _ro, _sigr],
tt.grad(
self.map.ops.sT(_b, _theta, _bo, _ro, _sigr)[0, n], _bo
),
)
for n in range(self.map.Ny)
]
self._dsdro = [
theano.function(
[_b, _theta, _bo, _ro, _sigr],
tt.grad(
self.map.ops.sT(_b, _theta, _bo, _ro, _sigr)[0, n], _ro
),
)
for n in range(self.map.Ny)
]
self._dsdsigr = [
theano.function(
[_b, _theta, _bo, _ro, _sigr],
tt.grad(
self.map.ops.sT(_b, _theta, _bo, _ro, _sigr)[0, n],
_sigr,
),
)
for n in range(self.map.Ny)
]
def s(self, b, theta, bo, ro, sigr, n=0):
if hasattr(ro, "__len__"):
assert not (
hasattr(b, "__len__")
or hasattr(theta, "__len__")
or hasattr(bo, "__len__")
or hasattr(sigr, "__len__")
)
return [
self._s([b], [theta], [bo], ro[i], sigr)[0, n]
for i in range(len(ro))
]
elif hasattr(sigr, "__len__"):
assert not (
hasattr(b, "__len__")
or hasattr(theta, "__len__")
or hasattr(bo, "__len__")
or hasattr(ro, "__len__")
)
return [
self._s([b], [theta], [bo], ro, sigr[i])[0, n]
for i in range(len(sigr))
]
else:
assert (
hasattr(b, "__len__")
or hasattr(theta, "__len__")
or hasattr(bo, "__len__")
)
shaper = (
np.zeros_like(b) + np.zeros_like(bo) + np.zeros_like(theta)
)
b += shaper
theta += shaper
bo += shaper
return self._s(b, theta, bo, ro, sigr)[:, n]
def dsdb(self, b, theta, bo, ro, sigr, n=0):
b = np.atleast_1d(b)
assert not hasattr(theta, "__len__")
assert not hasattr(bo, "__len__")
assert not hasattr(ro, "__len__")
assert not hasattr(sigr, "__len__")
return np.array(
[
self._dsdb[n]([b[i]], [theta], [bo], ro, sigr)
for i in range(len(b))
]
)
def dsdtheta(self, b, theta, bo, ro, sigr, n=0):
theta = np.atleast_1d(theta)
assert not hasattr(b, "__len__")
assert not hasattr(bo, "__len__")
assert not hasattr(ro, "__len__")
assert not hasattr(sigr, "__len__")
return np.array(
[
self._dsdtheta[n]([b], [theta[i]], [bo], ro, sigr)
for i in range(len(theta))
]
)
def dsdbo(self, b, theta, bo, ro, sigr, n=0):
bo = np.atleast_1d(bo)
assert not hasattr(b, "__len__")
assert not hasattr(theta, "__len__")
assert not hasattr(ro, "__len__")
assert not hasattr(sigr, "__len__")
return np.array(
[
self._dsdbo[n]([b], [theta], [bo[i]], ro, sigr)
for i in range(len(bo))
]
)
def dsdro(self, b, theta, bo, ro, sigr, n=0):
ro = np.atleast_1d(ro)
assert not hasattr(b, "__len__")
assert not hasattr(theta, "__len__")
assert not hasattr(bo, "__len__")
assert not hasattr(sigr, "__len__")
return np.array(
[
self._dsdro[n]([b], [theta], [bo], ro[i], sigr)
for i in range(len(ro))
]
)
def dsdsigr(self, b, theta, bo, ro, sigr, n=0):
sigr = np.atleast_1d(sigr)
assert not hasattr(b, "__len__")
assert not hasattr(theta, "__len__")
assert not hasattr(bo, "__len__")
assert not hasattr(ro, "__len__")
return np.array(
[
self._dsdsigr[n]([b], [theta], [bo], ro, sigr[i])
for i in range(len(sigr))
]
)
def grad(self, b, theta, bo, ro, sigr, n=0):
if hasattr(b, "__len__"):
wrt = b
elif hasattr(theta, "__len__"):
wrt = theta
elif hasattr(bo, "__len__"):
wrt = bo
elif hasattr(ro, "__len__"):
wrt = ro
elif hasattr(sigr, "__len__"):
wrt = sigr
else:
assert False
return np.gradient(
self.s(b, theta, bo, ro, sigr, n=n), edge_order=2
) / np.gradient(wrt, edge_order=2)
return Model()
def test_derivs(model, n=1, npts=10000, atol=1e-5, plot=False, throw=True):
if plot:
fig, ax = plt.subplots(3, 5, figsize=(16, 7))
# b gradient
theta = 0.51
bo = 0.75
ro = 0.1
sigr = 0.0
b = np.linspace(-1, 1, npts)
g1 = model.grad(b, theta, bo, ro, sigr, n=n)
g2 = model.dsdb(b, theta, bo, ro, sigr, n=n).flatten()
# Pad the edges (numerical gradient isn't great)
if throw:
assert np.allclose(g1[200:-50], g2[200:-50], atol=atol), "error in b"
if plot:
ax[0, 0].plot(b, s(b, theta, bo, ro, sigr, n=n))
ax[1, 0].plot(b, g1, lw=2)
ax[1, 0].plot(b, g2, lw=1)
ax[2, 0].plot(b, np.log10(np.abs(g1 - g2)), "k")
ax[2, 0].set_xlabel("b")
for axis in ax[:, 0]:
axis.set_xlim(-1, 1)
# theta gradient
b = 0.51
bo = 0.75
ro = 0.1
sigr = 30 * np.pi / 180
theta = np.linspace(-np.pi, np.pi, npts)
g1 = model.grad(b, theta, bo, ro, sigr, n=n)
g2 = model.dsdtheta(b, theta, bo, ro, sigr, n=n).flatten()
if throw:
assert np.allclose(g1, g2, atol=atol), "error in theta"
if plot:
ax[0, 1].plot(theta, s(b, theta, bo, ro, sigr, n=n))
ax[1, 1].plot(theta, g1, lw=2)
ax[1, 1].plot(theta, g2, lw=1)
ax[2, 1].plot(theta, np.log10(np.abs(g1 - g2)), "k")
ax[2, 1].set_xlabel("theta")
for axis in ax[:, 1]:
axis.set_xlim(-np.pi, np.pi)
# bo gradient
b = 0.51
theta = 0.49
ro = 0.1
sigr = 30 * np.pi / 180
bo = np.linspace(0, 1.5, npts)
g1 = model.grad(b, theta, bo, ro, sigr, n=n)
g2 = model.dsdbo(b, theta, bo, ro, sigr, n=n).flatten()
if throw:
assert np.allclose(g1, g2, atol=atol), "error in bo"
if plot:
ax[0, 2].plot(bo, s(b, theta, bo, ro, sigr, n=n))
ax[1, 2].plot(bo, g1, lw=2)
ax[1, 2].plot(bo, g2, lw=1)
ax[2, 2].plot(bo, np.log10(np.abs(g1 - g2)), "k")
ax[2, 2].set_xlabel("bo")
for axis in ax[:, 2]:
axis.set_xlim(0, 1.5)
# ro gradient
b = 0.51
theta = 0.49
bo = 0.75
sigr = 30 * np.pi / 180
ro = np.linspace(0.001, 1.5, npts)
g1 = model.grad(b, theta, bo, ro, sigr, n=n)
g2 = model.dsdro(b, theta, bo, ro, sigr, n=n).flatten()
if throw:
assert np.allclose(g1, g2, atol=atol), "error in ro"
if plot:
ax[0, 3].plot(ro, s(b, theta, bo, ro, sigr, n=n))
ax[1, 3].plot(ro, g1, lw=2)
ax[1, 3].plot(ro, g2, lw=1)
ax[1, 3].set_ylim(-1.5, 1.0)
ax[2, 3].plot(ro, np.log10(np.abs(g1 - g2)), "k")
ax[2, 3].set_xlabel("ro")
for axis in ax[:, 3]:
axis.set_xlim(0.001, 1.5)
for axis in ax[2, :]:
axis.set_ylim(-6, 0)
# sigr gradient
b = 0.51
theta = 0.49
bo = 0.75
ro = 0.1
sigr = np.linspace(0, 30 * np.pi / 180, npts)
g1 = model.grad(b, theta, bo, ro, sigr, n=n)
g2 = model.dsdsigr(b, theta, bo, ro, sigr, n=n).flatten()
if throw:
assert np.allclose(g1, g2, atol=atol), "error in sigr"
if plot:
ax[0, 4].plot(sigr, s(b, theta, bo, ro, sigr, n=n))
ax[1, 4].plot(sigr, g1, lw=2)
ax[1, 4].plot(sigr, g2, lw=1)
ax[1, 4].set_ylim(-1.5, 1.0)
ax[2, 4].plot(sigr, np.log10(np.abs(g1 - g2)), "k")
ax[2, 4].set_xlabel("sigr")
for axis in ax[:, 4]:
axis.set_xlim(0, 30 * np.pi / 180)
plt.show()
def test_abs_b_one():
"""Check derivs are finite when b=+/-1."""
b = tt.dscalar()
b.tag.test_value = -1.0
map = starry.Map(reflected=True)
def flux(b):
return map.flux(zs=-b, ys=0)
grad = theano.function([b], tt.grad(flux(b)[0], [b]))
assert not np.isnan(grad(-1.0)[0]) and not np.isnan(grad(1.0)[0])
if __name__ == "__main__":
test_abs_b_one()
|
1627783
|
from wpc import db, app, socketio
from wpc.flask_utils import url_for_other_page, url_change_args, nl2br, nl2br_py, get_or_create, is_safe_url
from wpc.models import MozillaStreamHack # NOQA
from wpc.models import YoutubeStream, WPCStream, Stream, Streamer, Subscriber, Idea, ChatMessage
from wpc.forms import SubscribeForm, GLMSubscribeForm, EditStreamerInfoForm, EditStreamTitleForm,\
SearchForm, IdeaForm, RtmpRedirectForm, DashboardEmailForm, DashboardAddVideoForm # NOQA
from flask import render_template, request, redirect, url_for, flash, jsonify, g, Response, session, abort
from flask.ext.login import login_user, logout_user, login_required, current_user
from flask.ext.socketio import emit, join_room
from jinja2 import Markup
from flask.views import View
from sqlalchemy import case
from utils import youtube_video_id
from uuid import uuid4
import praw
import random
from feedgen.feed import FeedGenerator
from datetime import datetime
import pytz
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
app.jinja_env.globals['url_change_args'] = url_change_args
app.add_template_filter(nl2br)
@app.before_request
def add_ga_tracking_code():
g.ga_tracking_code = app.config['GA_TRACKING_CODE']
@app.before_request
def create_search_form():
g.search_form = SearchForm()
@app.before_request
def add_rtmp_secret():
if current_user.is_authenticated() and not current_user.rtmp_secret:
current_user.generate_rtmp_stuff()
@app.after_request
def set_subscribing_cookies(response):
subscribe_send_only_id = current_user.is_authenticated() and current_user.as_subscriber
response.set_cookie("subscribe_send_only_id", value="true" if subscribe_send_only_id else "false")
return response
def process_idea_form(idea_form):
if idea_form.submit_button.data and idea_form.validate_on_submit():
idea = Idea()
idea_form.populate_obj(idea)
db.session.add(idea)
db.session.commit()
flash("Your idea was added successfully!", "success")
return redirect(url_for("idea_list"))
@app.route('/streaming_guide')
def streaming_guide():
return render_template("streaming_guide.html")
@app.route('/', methods=['GET', 'POST'])
def index():
live_streams = Stream.query.filter_by(status='live').order_by(
case(
[
(Stream.streamer_id == None, None), # NOQA
(Stream.actual_start_time == None, None)
],
else_=Stream.actual_start_time
).desc().nullslast()
).all()
# Uncomment this when mozilla guys start livestreaming
# live_streams.insert(0, MozillaStreamHack())
idea_form = IdeaForm(prefix='idea')
redir = process_idea_form(idea_form)
if redir:
return redir
subscribe_form = SubscribeForm(prefix='subscribe')
if subscribe_form.submit_button.data and subscribe_form.validate_on_submit():
subscriber = Subscriber()
subscribe_form.populate_obj(subscriber)
db.session.add(subscriber)
db.session.commit()
flash("You've subscribed successfully!", "success")
return redirect(url_for('.index'))
random_stream = YoutubeStream.query.filter(YoutubeStream.status != 'upcoming').order_by(db.func.random()).first()
upcoming_streams = Stream.query.filter_by(status='upcoming').order_by(Stream.scheduled_start_time.asc()).all()
regular_streamer = Streamer.query.filter_by(reddit_username='gkbrk').one()
return render_template('index.html', subscribe_form=subscribe_form, idea_form=idea_form, live_streams=live_streams,
random_stream=random_stream,
upcoming_streams=upcoming_streams,
regular_streamer=regular_streamer)
@app.route('/idea_list', methods=['GET', 'POST'])
def idea_list():
ideas = Idea.query.order_by(Idea.id.desc()).all()
idea_form = IdeaForm(prefix='idea')
redir = process_idea_form(idea_form)
if redir:
return redir
return render_template("idea_list.html", ideas=ideas, idea_form=idea_form)
@app.route('/onlineconf', methods=['GET', 'POST'])
def onlineconf():
streams = YoutubeStream.query.filter_by(confstream=True).filter(
Stream.status == 'completed').order_by(
Stream.actual_start_time.desc().nullsfirst(),
Stream.id.desc()).all()
return render_template('onlineconf.html', streams=streams)
@app.route('/search', methods=['GET', 'POST'])
def search():
if g.search_form.validate_on_submit():
return redirect(url_for("past_streams", query=g.search_form.query.data.strip()))
else:
# Should never happen, unless user requested /search manually
return redirect(url_for("past_streams"))
@app.route('/past_streams', defaults={'page': 1, 'query': None})
@app.route('/past_streams/query/<query>', defaults={'page': 1})
@app.route('/past_streams/page/<int:page>', defaults={'query': None})
@app.route('/past_streams/query/<query>/page/<int:page>')
def past_streams(query, page):
streams = YoutubeStream.query.filter_by(status='completed')
if query:
terms = [t for t in query.split()]
streams = streams.filter(YoutubeStream.title.match(" & ".join(terms)))
sort_by = request.args.get('sort_by', 'views')
if sort_by == 'time':
streams = streams.order_by(YoutubeStream.actual_start_time.desc().nullslast())
else:
streams = streams.order_by(YoutubeStream.vod_views.desc().nullslast())
streams = streams.paginate(page, per_page=5)
return render_template('past_streams.html', streams=streams, page=page, query=query)
@app.route('/streamers/', defaults={'page': 1})
@app.route('/streamers/<int:page>')
def streamers_list(page):
streamers = Streamer.query.filter(Streamer.streams.any()).order_by(Streamer.reddit_username).paginate(page, per_page=50)
return render_template('streamers_list.html', streamers=streamers)
@app.route('/streamer/<streamer_name>/popout_chat', methods=["GET", "POST"])
def streamer_popout_chat(streamer_name):
streamer = Streamer.query.filter_by(reddit_username=streamer_name).first_or_404()
stream = WPCStream.query.filter_by(channel_name=streamer_name).first_or_404()
return render_template("streamer_popout_chat.html", streamer=streamer, stream=stream)
@app.route('/admin/streamer/<streamer_name>/rtmp_redirect/<int:redirect_id>')
def streamer_rtmp_redirect(streamer_name, redirect_id):
if request.remote_addr not in ['172.16.58.3', '127.0.0.1'] and request.headers.getlist("X-Forwarded-For")[-1] != '172.16.58.3':
return "", 403
if redirect_id not in [1, 2, 3]:
return "", 404
streamer = Streamer.query.filter_by(reddit_username=streamer_name).first_or_404()
redirect_url = getattr(streamer, 'rtmp_redirect_{}'.format(redirect_id))
if not redirect_url:
return "", 404
return redirect_url
# TODO: maybe make this a blueprint?
def force_test_login():
if request.remote_addr != '127.0.0.1':
abort(403)
test_account = Streamer.query.filter_by(reddit_username='if').first_or_404()
login_user(test_account)
return redirect(url_for('index'))
def add_force_test_login(app):
app.add_url_rule('/force_test_login', 'force_test_login', force_test_login)
class StreamerPage(View):
methods = ['GET', 'POST']
def dispatch_request(self, streamer_name, page):
streamer = Streamer.query.filter_by(reddit_username=streamer_name).first_or_404()
wpc_stream = streamer.streams.filter_by(type='wpc_stream').first()
# glm_talkshow stuff
if streamer_name == 'glm_talkshow':
subscribe_form = GLMSubscribeForm(prefix='streamer_subscribe')
if subscribe_form.validate_on_submit():
subscriber = get_or_create(Subscriber, email=subscribe_form.email.data)
if subscriber not in streamer.subscribers:
streamer.subscribers.append(subscriber)
flash("Subscribed successfully!", category='success')
else:
flash("You're already subscribed!")
db.session.commit()
yt_recording_ep1 = YoutubeStream.query.filter_by(ytid='f968E8eZmvM').one()
yt_recording_ep2 = YoutubeStream.query.filter_by(ytid='87SfA1sw7vY').one()
yt_recording_ep3 = YoutubeStream.query.filter_by(ytid='R7z2GQr9-tg').one()
yt_recording_ep4 = YoutubeStream.query.filter_by(ytid='zU7ltY9Dmnk').one()
yt_recording_ep5 = YoutubeStream.query.filter_by(ytid='3A_oTuzGoeE').one()
how_to_learn_programming = YoutubeStream.query.filter_by(ytid='6XtSPvjt87w').one()
return render_template('streamers/glm_talkshow.html',
streamer=streamer,
wpc_stream=wpc_stream,
yt_stream_ep1=yt_recording_ep1,
yt_stream_ep2=yt_recording_ep2,
yt_stream_ep3=yt_recording_ep3,
yt_stream_ep4=yt_recording_ep4,
yt_stream_ep5=yt_recording_ep5,
how_to_learn_programming=how_to_learn_programming,
subscribe_form=subscribe_form)
# all stuff
streams = streamer.streams
if wpc_stream:
streams = streams.filter(Stream.id != wpc_stream.id)
streams = streams.order_by(Stream.actual_start_time.desc().nullslast()).paginate(page, per_page=5)
check_profile_alert = False
info_form = EditStreamerInfoForm(prefix='info')
title_form = EditStreamTitleForm(prefix='title')
if current_user.is_authenticated() and current_user == streamer:
if request.method == 'POST':
if info_form.submit_button.data:
if info_form.validate_on_submit():
current_user.populate(info_form)
db.session.commit()
flash("Updated successfully", category='success')
return redirect(url_for('.streamer_page', streamer_name=streamer_name, page=page))
elif title_form.submit_button.data:
if title_form.validate_on_submit():
wpc_stream.title = title_form.title.data
db.session.commit()
return jsonify(newTitle=Markup.escape(title_form.title.data))
else:
if not streamer.checked:
streamer.checked = True
db.session.commit()
if (streamer.youtube_channel or streamer.twitch_channel):
check_profile_alert = True
info_form.youtube_channel.data = current_user.youtube_channel
info_form.twitch_channel.data = current_user.twitch_channel
info_form.info.data = current_user.info
if wpc_stream:
title_form.title.data = wpc_stream.title
return render_template('streamer.html', streamer=streamer,
streams=streams, info_form=info_form,
title_form=title_form, wpc_stream=wpc_stream,
check_profile_alert=check_profile_alert)
streamer_page = StreamerPage.as_view('streamer_page')
app.add_url_rule('/streamer/<streamer_name>', defaults={'page': 1},
view_func=streamer_page)
app.add_url_rule('/streamer/<streamer_name>/<int:page>',
view_func=streamer_page)
@app.route('/dashboard/<tab>', methods=['POST', 'GET'])
@app.route('/dashboard', defaults={"tab": "streaming"}, methods=['POST', 'GET'])
@login_required
def dashboard(tab):
rtmp_redirect_form = RtmpRedirectForm(prefix='rtmpform')
email_form = DashboardEmailForm(prefix='emailform')
add_video_form = DashboardAddVideoForm(prefix='addvideoform')
if request.method == "GET":
rtmp_redirect_form.prepopulate(current_user)
email_form.prepopulate(current_user)
if rtmp_redirect_form.validate_on_submit():
rtmp_redirect_form.populate_obj(current_user)
db.session.commit()
flash('Successfully updated your RTMP redirects!', 'success')
return redirect(url_for("dashboard", tab="streaming"))
if email_form.validate_on_submit():
current_user.populate_email(email_form.email.data)
db.session.commit()
flash('Successfully updated your email address!', 'success')
return redirect(url_for("dashboard", tab="email"))
if add_video_form.validate_on_submit():
ytid = youtube_video_id(add_video_form.link.data)
ys = get_or_create(YoutubeStream, ytid=ytid)
ys.streamer = current_user
for i in xrange(10):
try:
ys._update_status()
db.session.commit()
flash(u'Successfully added YouTube video "{}"'.format(Markup.escape(ys.title)), 'success')
break
except Exception as e:
app.logger.error("Failed to add YouTube video {}".format(ys))
app.logger.exception(e)
else:
flash("Failed to add YouTube video! Try again?", 'error')
app.logger.error("Failed to add YouTube video multiple times {}".format(ys))
return redirect(url_for("dashboard", tab="video-archive"))
return render_template("dashboard.html",
rtmp_redirect_form=rtmp_redirect_form,
email_form=email_form,
add_video_form=add_video_form,
tab=tab)
@app.route('/_subscriptions', methods=["PUT", "DELETE"])
def _subscribe_to_streamer():
if ('email' not in request.form and not current_user.is_authenticated()) or\
'streamer_id' not in request.form:
abort(400)
streamer_id = request.form['streamer_id']
if current_user.is_anonymous() or not current_user.as_subscriber:
email = request.form['email']
else:
email = current_user.as_subscriber.email
streamer = Streamer.query.get_or_404(streamer_id)
subscriber = get_or_create(Subscriber, email=email)
if current_user.is_authenticated():
current_user.as_subscriber = subscriber
if request.method == "PUT":
if subscriber not in streamer.subscribers:
streamer.subscribers.append(subscriber)
else:
if subscriber in streamer.subscribers:
streamer.subscribers.remove(subscriber)
db.session.commit()
response = app.make_response(jsonify(result="OK"))
response.set_cookie("email", value=email)
return response
@app.route('/reddit_authorize_callback')
def reddit_authorize_callback():
r = praw.Reddit(user_agent=app.config["REDDIT_WEB_APP_USER_AGENT"])
r.set_oauth_app_info(app.config['REDDIT_API_ID'], app.config['REDDIT_API_SECRET'], url_for('.reddit_authorize_callback', _external=True))
name = None
code = request.args.get('code', '')
if code:
r.get_access_information(code)
name = r.get_me().name
if name:
user = get_or_create(Streamer, reddit_username=name)
db.session.commit()
login_user(user, remember=True)
flash("Logged in successfully!", 'success')
if not name:
flash("An error occurred while trying to log in.", 'error')
next_url = session.pop('next_url_after_login', url_for("streamer_page", streamer_name=name))
return redirect(next_url)
@app.route('/auth')
def authorize():
if is_safe_url(request.referrer):
session['next_url_after_login'] = request.referrer
r = praw.Reddit(user_agent=app.config["REDDIT_WEB_APP_USER_AGENT"])
r.set_oauth_app_info(app.config['REDDIT_API_ID'], app.config['REDDIT_API_SECRET'], url_for('.reddit_authorize_callback', _external=True))
session['unique_key'] = uuid4()
url = r.get_authorize_url(session['unique_key'], 'identity')
return redirect(url)
def authenticate_streamer():
streamer_username = request.values.get('name', '')
rtmp_secret = request.values.get('pass', '')
streamer = Streamer.query.filter_by(reddit_username=streamer_username).first()
if not streamer or not streamer.rtmp_secret or streamer.rtmp_secret != rtmp_secret:
app.logger.info(u"Fail to check credentials for streamer {}".format(streamer_username))
return None, None
return get_or_create(WPCStream, channel_name=streamer_username), streamer
@app.route('/rtmp_auth', methods=['POST'])
def rtmp_auth():
stream, streamer = authenticate_streamer()
if stream is None:
abort(403)
app.logger.info(u"{} went live".format(stream))
stream.streamer = streamer
# test stream
if streamer.test:
db.session.commit()
return "OK"
stream.actual_start_time = datetime.utcnow()
stream._go_live()
db.session.commit()
return "OK"
@app.route('/rtmp_done', methods=['POST'])
def rtmp_done():
stream, streamer = authenticate_streamer()
if stream is not None:
app.logger.info(u"{} done".format(stream))
stream.status = 'completed'
stream.actual_start_time = None
stream.current_viewers = None
db.session.commit()
return "OK"
@app.route('/_regenerate_rtmp_key')
def regenerate_rtmp_key():
if not current_user.is_authenticated():
abort(403)
current_user.generate_rtmp_stuff()
return jsonify(rtmp_key=current_user.streaming_key())
@app.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out successfully!", 'info')
return redirect(url_for(".index"))
@app.route("/podcast_feed.xml")
def podcast_feed():
logo_url = url_for("static", filename="wpclogo_big.png", _external=True)
fg = FeedGenerator()
fg.load_extension('podcast')
fg.podcast.itunes_category('Technology', 'Podcasting')
fg.podcast.itunes_image(logo_url)
fg.author({'name': '<NAME>', 'email': '<EMAIL>'})
fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
fg.title('WPC Coders Podcast')
fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.') # NOQA
episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'),
('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'),
('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder <NAME>. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"), # NOQA
('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps <NAME>(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")] # NOQA
for epfile, eptitle, epdate, epdescription in episodes[::-1]:
epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
fe = fg.add_entry()
fe.id(epurl)
fe.title(eptitle)
fe.description(epdescription)
fe.podcast.itunes_image(logo_url)
fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
fe.enclosure(epurl, 0, 'audio/mpeg')
return Response(response=fg.rss_str(pretty=True),
status=200,
mimetype='application/rss+xml')
chat_users = set()
@socketio.on('connect', namespace='/chat')
def chat_connect():
print('New connection')
return True
@socketio.on('initialize', namespace='/chat')
def chat_initialize():
first_words = ['True', 'False', 'For', 'While', 'If', 'Else', 'Elif', 'Undefined', 'Do', 'Virtual', 'Inline',
'Exit', 'Continue', 'Super', 'Break', 'Switch', 'Try', 'Catch', 'Class', 'Object', 'Abstract', 'Interface',
'Def', 'Var', 'Pass', 'Return', 'Static', 'Const', 'Template', 'Delete', 'Int',
'Float', 'Struct', 'Void', 'Self', 'This']
second_words = ['C', 'C++', 'Lisp', 'Python', 'Java', 'JavaScript', 'Pascal', 'Objective-C',
'C#', 'Perl', 'Ruby', 'Ada', 'Haskell', 'Octave', 'Basic', 'Fortran', 'PHP', 'R',
'Assembly', 'COBOL', 'Rust', 'Swift', 'Bash', 'Brainfuck', 'OCaml', 'Clojure']
if current_user.is_authenticated():
session['username'] = current_user.reddit_username
elif 'username' not in session or session['username'] in chat_users:
while True:
session['username'] = random.choice(first_words) + ' ' + random.choice(second_words)
if session['username'] not in chat_users:
chat_users.add(session['username'])
break
db.session.close()
def check_chat_access_and_get_streamer(streamer_username=None):
if 'username' not in session or streamer_username is None:
return None
streamer = Streamer.query.filter_by(reddit_username=streamer_username.strip()).first()
return streamer
@socketio.on('join', namespace='/chat')
def join(streamer_username):
streamer = check_chat_access_and_get_streamer(streamer_username)
if streamer is None:
request.namespace.disconnect()
else:
join_room(streamer.reddit_username)
if current_user.is_authenticated():
emit('join', False, session['username']) # Sending the username before actual join.
old_messages = []
for msg in reversed(ChatMessage.query.filter_by(streamer=streamer).order_by(ChatMessage.id.desc()).limit(20).all()):
if msg.text == "/clear":
old_messages = []
else:
old_messages.append(msg)
emit('last_messages',
[{"sender": msg.sender,
"text": nl2br_py(msg.text)}
for msg in old_messages])
emit('join', True, session['username'])
db.session.close()
@socketio.on('disconnect', namespace='/chat')
def chat_disconnect():
if 'username' in session and not current_user.is_authenticated:
chat_users.remove(session['username'])
@socketio.on('message', namespace='/chat')
def chat_message(message_text, streamer_username):
streamer = check_chat_access_and_get_streamer(streamer_username)
if len(message_text) > 2048:
message_text = u"{}... <message is too big>".format(message_text[:2048])
message = {"sender": session['username'],
"text": nl2br_py(message_text)}
if current_user.is_anonymous() and\
streamer.streams.filter_by(type='wpc_stream').one().chat_anon_forbidden:
emit("forbidden")
elif current_user.is_authenticated() and current_user.is_banned:
emit("message", message)
else:
if message_text.startswith("/clear"):
if current_user.is_authenticated() and current_user.reddit_username == streamer.reddit_username:
emit("clear", room=streamer.reddit_username) # Clear for all viewers
clear_message = ChatMessage(streamer=streamer, text="/clear", sender=session["username"])
db.session.add(clear_message)
db.session.commit()
else:
emit("clear") # Clear for one user
else:
# Normal chat message
cm = ChatMessage(streamer=streamer, text=message_text, sender=session['username'])
db.session.add(cm)
db.session.commit()
emit("message", message, room=streamer.reddit_username)
db.session.close()
return True
@socketio.on_error_default
def default_error_handler(e):
app.logger.error(e)
|
1627793
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework import permissions
from api.models import College
from api.serializers import CollegeSerializer
COLLEGE_URL = reverse('college-list')
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicCollegeApiTests(TestCase):
"""Test unauthenticated college get API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(COLLEGE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateCollegeApiTests(TestCase):
"""Test the authorized user college API"""
def setUp(self):
email = '<EMAIL>'
password = '<PASSWORD>'
name = 'unique_name'
self.user = get_user_model().objects.create_user(
email=email,
password=password,
name=name
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving college objects"""
College.objects.create(college_name="user4",
college_address="user4",
grade="user4",
degree="user4",
from_date="2016-04-20T19:49:25.541768Z",
to_date="2020-04-20T19:49:25.541799Z",
user_profile=self.user
)
res = self.client.get(COLLEGE_URL)
college = College.objects.all().order_by('-college_name')
serializer = CollegeSerializer(college, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['data'], serializer.data)
|
1627817
|
from collections import defaultdict
import copy
# 3p
import mock
# project
from tests.checks.common import AgentCheckTest
MOCK_DATA = """# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
a,FRONTEND,,,1,2,12,1,11,11,0,0,0,,,,,OPEN,,,,,,,,,1,1,0,,,,0,1,0,2,,,,0,1,0,0,0,0,,1,1,1,,,
a,BACKEND,0,0,0,0,12,0,11,11,0,0,,0,0,0,0,UP,0,0,0,,0,1221810,0,,1,1,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,
b,FRONTEND,,,1,2,12,11,11,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,,,,,,,,0,0,0,,,
b,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-2,0,0,1,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,0,1,0,,1,3,2,,71,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-4,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-5,0,0,0,1,,1,1,0,,0,,0,0,0,0,MAINT,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
c,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
c,i-2,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN (agent),1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
c,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,NO CHECK,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
c,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
"""
AGG_STATUSES_BY_SERVICE = (
(['status:available', 'service:a'], 1),
(['status:available', 'service:b'], 4),
(['status:unavailable', 'service:b'], 2),
(['status:available', 'service:c'], 1),
(['status:unavailable', 'service:c'], 2)
)
AGG_STATUSES = (
(['status:available'], 6),
(['status:unavailable'], 4)
)
class TestCheckHAProxy(AgentCheckTest):
CHECK_NAME = 'haproxy'
BASE_CONFIG = {
'init_config': None,
'instances': [
{
'url': 'http://localhost/admin?stats',
'collect_status_metrics': True,
}
]
}
def _assert_agg_statuses(self, count_status_by_service=True, collate_status_tags_per_host=False):
expected_statuses = AGG_STATUSES_BY_SERVICE if count_status_by_service else AGG_STATUSES
for tags, value in expected_statuses:
if collate_status_tags_per_host:
# Assert that no aggregate statuses are sent
self.assertMetric('haproxy.count_per_status', tags=tags, count=0)
else:
self.assertMetric('haproxy.count_per_status', value=value, tags=tags)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_agg_only(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
# with count_status_by_service set to False
config['instances'][0]['count_status_by_service'] = False
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=2, tags=['status:open'])
self.assertMetric('haproxy.count_per_status', value=4, tags=['status:up'])
self.assertMetric('haproxy.count_per_status', value=2, tags=['status:down'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:maint'])
self.assertMetric('haproxy.count_per_status', value=0, tags=['status:nolb'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:no_check'])
self._assert_agg_statuses(count_status_by_service=False)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_by_service(self, mock_requests):
self.run_check(self.BASE_CONFIG)
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:open', 'service:a'])
self.assertMetric('haproxy.count_per_status', value=3, tags=['status:up', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:open', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:down', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:maint', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:up', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:down', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:no_check', 'service:c'])
self._assert_agg_statuses()
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_by_service_and_host(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
config['instances'][0]['collect_status_metrics_by_host'] = True
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:open', 'service:a'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:open', 'service:b'])
for backend in ['i-1', 'i-2', 'i-3']:
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:%s' % backend, 'status:up', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:down', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:maint', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-1', 'status:up', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:down', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:no_check', 'service:c'])
self._assert_agg_statuses()
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_by_service_and_collate_per_host(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
config['instances'][0]['collect_status_metrics_by_host'] = True
config['instances'][0]['collate_status_tags_per_host'] = True
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:available', 'service:a'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:available', 'service:b'])
for backend in ['i-1', 'i-2', 'i-3']:
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:%s' % backend, 'status:available', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:unavailable', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:unavailable', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-1', 'status:available', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:unavailable', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:unavailable', 'service:c'])
self._assert_agg_statuses(collate_status_tags_per_host=True)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_collate_per_host(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
config['instances'][0]['collect_status_metrics_by_host'] = True
config['instances'][0]['collate_status_tags_per_host'] = True
config['instances'][0]['count_status_by_service'] = False
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=2, tags=['backend:FRONTEND', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=2, tags=['backend:i-1', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:unavailable'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:unavailable'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:unavailable'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:unavailable'])
self._assert_agg_statuses(count_status_by_service=False, collate_status_tags_per_host=True)
# This mock is only useful to make the first `run_check` run w/o errors (which in turn is useful only to initialize the check)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_hosts_statuses(self, mock_requests):
self.run_check(self.BASE_CONFIG)
data = """# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
a,FRONTEND,,,1,2,12,1,11,11,0,0,0,,,,,OPEN,,,,,,,,,1,1,0,,,,0,1,0,2,,,,0,1,0,0,0,0,,1,1,1,,,
a,BACKEND,0,0,0,0,12,0,11,11,0,0,,0,0,0,0,UP,0,0,0,,0,1221810,0,,1,1,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,
b,FRONTEND,,,1,2,12,11,11,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,,,,,,,,0,0,0,,,
b,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-2,0,0,1,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,0,1,0,,1,3,2,,71,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-4,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-5,0,0,0,1,,1,1,0,,0,,0,0,0,0,MAINT,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
""".split('\n')
# per service
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=False)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'open')] = 1
expected_hosts_statuses[('b', 'up')] = 3
expected_hosts_statuses[('b', 'down')] = 1
expected_hosts_statuses[('b', 'maint')] = 1
expected_hosts_statuses[('a', 'open')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# backend hosts
agg_statuses = self.check._process_backend_hosts_metric(expected_hosts_statuses)
expected_agg_statuses = {
'a': {'available': 0, 'unavailable': 0},
'b': {'available': 3, 'unavailable': 2},
}
self.assertEquals(expected_agg_statuses, dict(agg_statuses))
# with process_events set to True
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=False)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# per host
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=True)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('a', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('b', 'i-1', 'up')] = 1
expected_hosts_statuses[('b', 'i-2', 'up')] = 1
expected_hosts_statuses[('b', 'i-3', 'up')] = 1
expected_hosts_statuses[('b', 'i-4', 'down')] = 1
expected_hosts_statuses[('b', 'i-5', 'maint')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=True)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
|
1627825
|
from smtplib import SMTP as smtp
import json
def sendmail(sender_add, reciever_add, msg, password):
server = smtp('smtp.gmail.com:587')
server.starttls()
server.login(sender_add, password)
server.sendmail(sender_add, reciever_add, msg)
print("Mail sent succesfully....!")
group = {}
print('\t\t ......LOGIN.....')
your_add = input('Enter your email address :')
password = input('Enter your email password for login:')
print('\n\n\n\n')
choice = 'y'
while(choice != '3' or choice != 'no'):
print("\n 1.Create a group\n2.Message a group\n3.Exit")
choice = input()
if choice == '1':
ch = 'y'
while(ch != 'n'):
gname = input('Enter name of group :')
group[gname] = input('Enter contact emails separated by a single space :').rstrip()
ch = input('Add another....y/n? :').rstrip()
with open('groups.json', 'a') as f:
json.dump(group, f)
elif choice == '2':
gname = input('Enter name of group :')
try:
f = open('groups.json', 'r')
members = json.load(f)
f.close()
except:
print('Invalid group name. Please Create group first')
exit
members = members[gname].split()
msg = input('Enter message :')
for i in members:
try:
sendmail(your_add, i, msg, password)
except:
print("An unexpected error occured. Please try again later...")
continue
else:
break
|
1627856
|
import os, sys, json
from wptserve.utils import isomorphic_decode, isomorphic_encode
import importlib
util = importlib.import_module("common.security-features.scope.util")
def main(request, response):
policyDeliveries = json.loads(request.GET.first(b"policyDeliveries", b"[]"))
maybe_additional_headers = {}
meta = u''
error = u''
for delivery in policyDeliveries:
if delivery[u'deliveryType'] == u'meta':
if delivery[u'key'] == u'referrerPolicy':
meta += u'<meta name="referrer" content="%s">' % delivery[u'value']
else:
error = u'invalid delivery key'
elif delivery[u'deliveryType'] == u'http-rp':
if delivery[u'key'] == u'referrerPolicy':
maybe_additional_headers[b'Referrer-Policy'] = isomorphic_encode(delivery[u'value'])
else:
error = u'invalid delivery key'
else:
error = u'invalid deliveryType'
handler = lambda: util.get_template(u"document.html.template") % ({
u"meta": meta,
u"error": error
})
util.respond(
request,
response,
payload_generator=handler,
content_type=b"text/html",
maybe_additional_headers=maybe_additional_headers)
|
1627875
|
import pytest
from diofant import (I, O, Rational, Symbol, atanh, conjugate, elliptic_e,
elliptic_f, elliptic_k, elliptic_pi, gamma, hyper,
meijerg, oo, pi, sin, sqrt, tan, zoo)
from diofant.abc import m, n, z
from diofant.core.function import ArgumentIndexError
from diofant.utilities.randtest import random_complex_number as randcplx
from diofant.utilities.randtest import verify_derivative_numerically as td
from diofant.utilities.randtest import verify_numerically as tn
__all__ = ()
i = Symbol('i', integer=True)
j = Symbol('k', integer=True, positive=True)
def test_elliptic_k():
assert elliptic_k(0) == pi/2
assert elliptic_k(Rational(1, 2)) == 8*pi**Rational(3, 2)/gamma(-Rational(1, 4))**2
assert elliptic_k(1) == zoo
assert elliptic_k(-1) == gamma(Rational(1, 4))**2/(4*sqrt(2*pi))
assert elliptic_k(oo) == 0
assert elliptic_k(-oo) == 0
assert elliptic_k(I*oo) == 0
assert elliptic_k(-I*oo) == 0
assert elliptic_k(zoo) == 0
assert elliptic_k(z).diff(z) == (elliptic_e(z) - (1 - z)*elliptic_k(z))/(2*z*(1 - z))
assert td(elliptic_k(z), z)
pytest.raises(ArgumentIndexError, lambda: elliptic_k(z).fdiff(2))
zi = Symbol('z', extended_real=False)
assert elliptic_k(zi).conjugate() == elliptic_k(zi.conjugate())
zr = Symbol('z', extended_real=True, negative=True)
assert elliptic_k(zr).conjugate() == elliptic_k(zr)
assert elliptic_k(z).conjugate() == conjugate(elliptic_k(z), evaluate=False)
assert elliptic_k(z).rewrite(hyper) == \
(pi/2)*hyper((Rational(1, 2), Rational(1, 2)), (1,), z)
assert tn(elliptic_k(z), (pi/2)*hyper((Rational(1, 2), Rational(1, 2)), (1,), z))
assert elliptic_k(z).rewrite(meijerg) == \
meijerg(((Rational(1, 2), Rational(1, 2)), []), ((0,), (0,)), -z)/2
assert tn(elliptic_k(z), meijerg(((Rational(1, 2), Rational(1, 2)), []), ((0,), (0,)), -z)/2)
assert elliptic_k(z).series(z) == pi/2 + pi*z/8 + 9*pi*z**2/128 + \
25*pi*z**3/512 + 1225*pi*z**4/32768 + 3969*pi*z**5/131072 + O(z**6)
def test_elliptic_f():
assert elliptic_f(z, 0) == z
assert elliptic_f(0, m) == 0
assert elliptic_f(pi*i/2, m) == i*elliptic_k(m)
assert elliptic_f(z, oo) == 0
assert elliptic_f(z, -oo) == 0
assert elliptic_f(-z, m) == -elliptic_f(z, m)
assert elliptic_f(z, m).diff(z) == 1/sqrt(1 - m*sin(z)**2)
assert elliptic_f(z, m).diff(m) == elliptic_e(z, m)/(2*m*(1 - m)) - elliptic_f(z, m)/(2*m) - \
sin(2*z)/(4*(1 - m)*sqrt(1 - m*sin(z)**2))
r = randcplx()
assert td(elliptic_f(z, r), z)
assert td(elliptic_f(r, m), m)
pytest.raises(ArgumentIndexError, lambda: elliptic_f(z, m).fdiff(3))
mi = Symbol('m', extended_real=False)
assert elliptic_f(z, mi).conjugate() == elliptic_f(z.conjugate(), mi.conjugate())
mr = Symbol('m', extended_real=True, negative=True)
assert elliptic_f(z, mr).conjugate() == elliptic_f(z.conjugate(), mr)
assert elliptic_f(z, m).conjugate() == conjugate(elliptic_f(z, m), evaluate=False)
assert elliptic_f(z, m).series(z) == \
z + z**5*(3*m**2/40 - m/30) + m*z**3/6 + O(z**6)
def test_elliptic_e():
assert elliptic_e(z, 0) == z
assert elliptic_e(0, m) == 0
assert elliptic_e(i*pi/2, m) == i*elliptic_e(m)
assert elliptic_e(z, oo) == zoo
assert elliptic_e(z, -oo) == zoo
assert elliptic_e(0) == pi/2
assert elliptic_e(1) == 1
assert elliptic_e(oo) == I*oo
assert elliptic_e(-oo) == oo
assert elliptic_e(zoo) == zoo
assert elliptic_e(-z, m) == -elliptic_e(z, m)
assert elliptic_e(z, m).diff(z) == sqrt(1 - m*sin(z)**2)
assert elliptic_e(z, m).diff(m) == (elliptic_e(z, m) - elliptic_f(z, m))/(2*m)
assert elliptic_e(z).diff(z) == (elliptic_e(z) - elliptic_k(z))/(2*z)
r = randcplx()
assert td(elliptic_e(r, m), m)
assert td(elliptic_e(z, r), z)
assert td(elliptic_e(z), z)
pytest.raises(ArgumentIndexError, lambda: elliptic_e(z, m).fdiff(3))
pytest.raises(ArgumentIndexError, lambda: elliptic_e(z).fdiff(2))
mi = Symbol('m', extended_real=False)
assert elliptic_e(z, mi).conjugate() == elliptic_e(z.conjugate(), mi.conjugate())
assert elliptic_e(mi).conjugate() == elliptic_e(mi.conjugate())
mr = Symbol('m', extended_real=True, negative=True)
assert elliptic_e(z, mr).conjugate() == elliptic_e(z.conjugate(), mr)
assert elliptic_e(mr).conjugate() == elliptic_e(mr)
assert elliptic_e(z, m).conjugate() == conjugate(elliptic_e(z, m))
assert elliptic_e(z).conjugate() == conjugate(elliptic_e(z))
assert elliptic_e(z).rewrite(hyper) == (pi/2)*hyper((Rational(-1, 2), Rational(1, 2)), (1,), z)
assert elliptic_e(z, m).rewrite(hyper) == elliptic_e(z, m)
assert tn(elliptic_e(z), (pi/2)*hyper((Rational(-1, 2), Rational(1, 2)), (1,), z))
assert elliptic_e(z).rewrite(meijerg) == \
-meijerg(((Rational(1, 2), Rational(3, 2)), []), ((0,), (0,)), -z)/4
assert elliptic_e(z, m).rewrite(meijerg) == elliptic_e(z, m)
assert tn(elliptic_e(z), -meijerg(((Rational(1, 2), Rational(3, 2)), []), ((0,), (0,)), -z)/4)
assert elliptic_e(z, m).series(z) == \
z + z**5*(-m**2/40 + m/30) - m*z**3/6 + O(z**6)
assert elliptic_e(z).series(z) == pi/2 - pi*z/8 - 3*pi*z**2/128 - \
5*pi*z**3/512 - 175*pi*z**4/32768 - 441*pi*z**5/131072 + O(z**6)
def test_elliptic_pi():
assert elliptic_pi(0, z, m) == elliptic_f(z, m)
assert elliptic_pi(1, z, m) == elliptic_f(z, m) + \
(sqrt(1 - m*sin(z)**2)*tan(z) - elliptic_e(z, m))/(1 - m)
assert elliptic_pi(n, i*pi/2, m) == i*elliptic_pi(n, m)
assert elliptic_pi(n, z, 0) == atanh(sqrt(n - 1)*tan(z))/sqrt(n - 1)
assert elliptic_pi(n, z, n) == elliptic_f(z, n) - elliptic_pi(1, z, n) + tan(z)/sqrt(1 - n*sin(z)**2)
assert elliptic_pi(oo, z, m) == 0
assert elliptic_pi(-oo, z, m) == 0
assert elliptic_pi(n, z, oo) == 0
assert elliptic_pi(n, z, -oo) == 0
assert elliptic_pi(0, m) == elliptic_k(m)
assert elliptic_pi(1, m) == zoo
assert elliptic_pi(n, 0) == pi/(2*sqrt(1 - n))
assert elliptic_pi(2, 1) == -oo
assert elliptic_pi(-1, 1) == oo
assert elliptic_pi(n, n) == elliptic_e(n)/(1 - n)
assert elliptic_pi(oo, m) == 0
assert elliptic_pi(n, oo) == 0
assert elliptic_pi(n, -z, m) == -elliptic_pi(n, z, m)
ni, mi = Symbol('n', extended_real=False), Symbol('m', extended_real=False)
assert elliptic_pi(ni, z, mi).conjugate() == \
elliptic_pi(ni.conjugate(), z.conjugate(), mi.conjugate())
nr, mr = Symbol('n', extended_real=True, negative=True), \
Symbol('m', extended_real=True, negative=True)
assert elliptic_pi(nr, z, mr).conjugate() == elliptic_pi(nr, z.conjugate(), mr)
assert elliptic_pi(n, m).conjugate() == elliptic_pi(n.conjugate(), m.conjugate())
assert elliptic_pi(n, z, m).conjugate() == conjugate(elliptic_pi(n, z, m))
assert elliptic_pi(n, z, m).diff(n) == (elliptic_e(z, m) + (m - n)*elliptic_f(z, m)/n +
(n**2 - m)*elliptic_pi(n, z, m)/n - n*sqrt(1 -
m*sin(z)**2)*sin(2*z)/(2*(1 - n*sin(z)**2)))/(2*(m - n)*(n - 1))
assert elliptic_pi(n, z, m).diff(z) == 1/(sqrt(1 - m*sin(z)**2)*(1 - n*sin(z)**2))
assert elliptic_pi(n, z, m).diff(m) == (elliptic_e(z, m)/(m - 1) + elliptic_pi(n, z, m) -
m*sin(2*z)/(2*(m - 1)*sqrt(1 - m*sin(z)**2)))/(2*(n - m))
assert elliptic_pi(n, m).diff(n) == (elliptic_e(m) + (m - n)*elliptic_k(m)/n +
(n**2 - m)*elliptic_pi(n, m)/n)/(2*(m - n)*(n - 1))
assert elliptic_pi(n, m).diff(m) == (elliptic_e(m)/(m - 1) + elliptic_pi(n, m))/(2*(n - m))
# workaround fredrik-johansson/mpmath#571, suggested by <NAME>
# in https://github.com/sympy/sympy/issues/20933#issuecomment-779077562
bounds = {'a': -0.9, 'b': -0.9, 'c': 0.9, 'd': 0.9}
rx, ry = randcplx(**bounds), randcplx(**bounds)
assert td(elliptic_pi(n, rx, ry), n, **bounds)
assert td(elliptic_pi(rx, z, ry), z, **bounds)
assert td(elliptic_pi(rx, ry, m), m, **bounds)
pytest.raises(ArgumentIndexError, lambda: elliptic_pi(n, z, m).fdiff(4))
pytest.raises(ArgumentIndexError, lambda: elliptic_pi(n, m).fdiff(3))
assert elliptic_pi(n, z, m).series(z) == z + z**3*(m/6 + n/3) + \
z**5*(3*m**2/40 + m*n/10 - m/30 + n**2/5 - n/15) + O(z**6)
|
1627893
|
from django.http import HttpResponse
from django.views.generic import View
from django_renderpdf.views import PDFView
class PromptDownloadView(PDFView):
template_name = "test_template.html"
prompt_download = True
download_name = "myfile.pdf"
class NoPromptDownloadView(PDFView):
template_name = "test_template.html"
prompt_download = False
class AllowForceHtmlView(PDFView):
template_name = "test_template.html"
allow_force_html = True
class DisallowForceHtmlView(PDFView):
template_name = "test_template.html"
allow_force_html = False
class TemplateWithStaticFileView(PDFView):
template_name = "test_template_with_staticfile.html"
class CssView(View):
"""Test view that returns some CSS."""
def get(self, request):
return HttpResponse("* { background-color: red; }")
class NoTemplateDefinedView(PDFView):
"""A view that's missing a template_name."""
class PromptWithMissingDownloadNameView(PDFView):
template_name = "test_template.html"
prompt_download = True
|
1627917
|
from .revoked_token import RevokedToken
from .copy_job import CopyJob
from .hashsum_job import HashsumJob
from .cloud_connection import CloudConnection
|
1627924
|
from typing import List, Optional
from pydantic.dataclasses import dataclass
from rastervision.pipeline.config import (Config, register_config, Field,
validator, ConfigError)
from rastervision.core.data.raster_transformer import RasterTransformerConfig
from rastervision.core.utils.misc import Proportion
@dataclass
class CropOffsets:
"""Tuple of relative offsets.
Args:
skip_top (Proportion): Proportion of height to exclude from the top.
skip_left (Proportion): Proportion of width to exclude from the left.
skip_bottom (Proportion): Proportion of height to exclude from the
bottom.
skip_right (Proportion): Proportion of width to exclude from the right.
"""
skip_top: Proportion = 0.
skip_left: Proportion = 0.
skip_bottom: Proportion = 0.
skip_right: Proportion = 0.
def __iter__(self):
return iter((self.skip_top, self.skip_left, self.skip_bottom,
self.skip_right))
@register_config('raster_source')
class RasterSourceConfig(Config):
channel_order: Optional[List[int]] = Field(
None,
description=
'The sequence of channel indices to use when reading imagery.')
transformers: List[RasterTransformerConfig] = []
extent_crop: CropOffsets = Field(
None,
description='Relative offsets '
'(skip_top, skip_left, skip_bottom, skip_right) for cropping '
'the extent of the raster source. Useful for splitting a scene into '
'different dataset splits. E.g. if you want to use the top 80% of the '
'image for training and the bottom 20% for validation you can pass '
'extent_crop=CropOffsets(skip_bottom=0.20) to the raster source in '
'the training scene and extent_crop=CropOffsets(skip_top=0.80) to the '
'raster source in the validation scene. Defaults to None i.e. no '
'cropping.')
def build(self, tmp_dir, use_transformers=True):
raise NotImplementedError()
def update(self, pipeline=None, scene=None):
for t in self.transformers:
t.update(pipeline, scene)
@validator('extent_crop')
def validate_extent_crop(cls, v):
if v is None:
return v
skip_top, skip_left, skip_bottom, skip_right = v
if skip_top + skip_bottom >= 1:
raise ConfigError(
'Invalid crop. skip_top + skip_bottom must be less than 1.')
if skip_left + skip_right >= 1:
raise ConfigError(
'Invalid crop. skip_left + skip_right must be less than 1.')
return v
|
1627947
|
import os
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
from torchvision.utils import save_image
# Device Configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
latent_size = 64
hidden_size = 256
image_size = 784
num_epochs = 200
batch_size = 100
sample_dir = 'samples'
# Create a directory if not exists
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
# Image Processing
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
# MNIST Dataset
mnist = torchvision.datasets.MNIST(
root='../../../data/', train=True, transform=transform, download=True)
# Data Loader
data_loader = torch.utils.data.DataLoader(dataset=mnist,
batch_size=batch_size,
shuffle=True)
# Discriminator
Discriminator = nn.Sequential(
nn.Linear(image_size, hidden_size),
nn.LeakyReLU(0.2),
nn.Linear(hidden_size, hidden_size),
nn.LeakyReLU(0.2),
nn.Linear(hidden_size, 1),
nn.Sigmoid(),
)
Generator = nn.Sequential(
nn.Linear(latent_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, image_size),
nn.Tanh()
)
# Device setting
Discriminator = Discriminator.to(device)
Generator = Generator.to(device)
# Binary Cross Entropy and Optimizer
criterion = nn.BCELoss()
d_optimizer = torch.optim.Adam(Discriminator.parameters(), lr=0.0002)
g_optimizer = torch.optim.Adam(Generator.parameters(), lr=0.0002)
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def reset_grad():
d_optimizer.zero_grad()
g_optimizer.zero_grad()
# Start Training
total_step = len(data_loader)
for epoch in range(num_epochs):
for i, (images, _) in enumerate(data_loader):
images = images.reshape(batch_size, -1).to(device)
# Create label for BCE Loss
real_labels = torch.ones(batch_size, 1).to(device)
fake_labels = torch.zeros(batch_size, 1).to(device)
# Train the Discriminator
outputs = Discriminator(images)
d_loss_real = criterion(outputs, real_labels)
real_score = outputs
# Compute BCELoss using fake images
Z = torch.randn(batch_size, latent_size).to(device)
fake_images = Generator(Z)
outputs = Discriminator(fake_images)
d_loss_fake = criterion(outputs, fake_labels)
fake_score = outputs
# Backprop and optimize
d_loss = d_loss_real + d_loss_fake
reset_grad()
d_loss.backward()
d_optimizer.step()
# Train the Generator
# Compute loss with fake images
Z = torch.randn(batch_size, latent_size).to(device)
fake_images = Generator(Z)
outputs = Discriminator(fake_images)
g_loss = criterion(outputs, real_labels)
# Backprop and optimize
reset_grad()
g_loss.backward()
g_optimizer.step()
if (i + 1) % 200 == 0:
print('Epoch [{}/{}], Step [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}, D(x): {:.2f}, D(G(z)): {:.2f}'.format(epoch,
num_epochs, i + 1, total_step, d_loss.item(), g_loss.item(), real_score.mean().item(), fake_score.mean().item()))
if (epoch+1) == 1:
images = images.reshape(images.size(0), 1, 28, 28)
save_image(denorm(images), os.path.join(sample_dir, 'real-images.png'))
# Save Sample Images
fake_images = fake_images.reshape(images.size(0), 1, 28, 28)
save_image(denorm(fake_images), os.path.join(sample_dir, 'fake_images-{}.png'.format(epoch+1)))
|
1627951
|
import copy
import shutil
import tensorflow as tf
import tensorflow_hub as hub
from detext.layers import embedding_layer
from detext.utils.layer_utils import get_sorted_dict
from detext.utils.parsing_utils import InternalFtrType
from detext.utils.testing.data_setup import DataSetup
class TestEmbeddingLayer(tf.test.TestCase, DataSetup):
"""Tests embedding_layer.py"""
num_cls_sep = 0
min_len = 0
max_len = 5
sentences = tf.constant(['hello sent1', 'build sent2'])
inputs = get_sorted_dict({InternalFtrType.SENTENCES: sentences,
InternalFtrType.NUM_CLS: tf.constant(num_cls_sep, dtype=tf.dtypes.int32),
InternalFtrType.NUM_SEP: tf.constant(num_cls_sep, dtype=tf.dtypes.int32),
InternalFtrType.MIN_LEN: tf.constant(min_len, dtype=tf.dtypes.int32),
InternalFtrType.MAX_LEN: tf.constant(max_len, dtype=tf.dtypes.int32)})
embedding_layer_param = {'vocab_layer_param': DataSetup.vocab_layer_param,
'vocab_hub_url': '',
'we_file': '',
'we_trainable': True,
'num_units': DataSetup.num_units}
def testEmbeddingLayerApi(self):
"""Checks whether a given layer conforms to the detext embedding layer api"""
layer = hub.load(self.embedding_hub_url)
layer: embedding_layer.EmbeddingLayerBase
self.assertEqual(layer.num_units(), self.num_units)
self.assertEqual(layer.vocab_size(), self.vocab_size)
tokenized = layer.tokenize_to_indices(self.inputs)
expected_tokenized = {InternalFtrType.LENGTH: tf.constant([2, 2]),
InternalFtrType.TOKENIZED_IDS: tf.constant([[0, 0],
[4, 0]])}
for k, v in tokenized.items():
self.assertAllEqual(v, expected_tokenized[k])
tokenized_result = tf.constant([[1, 2], [0, 1]])
tokenized_result_shape = tf.shape(tokenized_result)
embedding_lookup_result = layer.embedding_lookup(tokenized_result)
self.assertAllEqual(tf.shape(embedding_lookup_result), [tokenized_result_shape[0], tokenized_result_shape[1], layer.num_units()])
outputs = layer(self.inputs)
self.assertAllEqual(tf.shape(outputs[InternalFtrType.EMBEDDED]), [tokenized_result_shape[0], tokenized_result_shape[1], layer.num_units()])
self.assertAllEqual(outputs[InternalFtrType.LENGTH], tf.constant([2, 2]))
def testCreateEmbeddingLayer(self):
for vocab_hub_url in ['', self.vocab_hub_url]:
embedding_layer_param = copy.copy(self.embedding_layer_param)
embedding_layer_param['vocab_hub_url'] = vocab_hub_url
self._testCreateEmbeddingLayer('', embedding_layer_param)
embedding_layer_param = copy.copy(self.embedding_layer_param)
embedding_layer_param['we_file'] = self.we_file
self._testCreateEmbeddingLayer('', embedding_layer_param)
embedding_layer_param = copy.copy(self.embedding_layer_param)
self._testCreateEmbeddingLayer(self.embedding_hub_url, embedding_layer_param)
def _testCreateEmbeddingLayer(self, embedding_hub_url, embedding_layer_param):
layer = embedding_layer.create_embedding_layer(embedding_layer_param, embedding_hub_url)
outputs = layer(self.inputs)
tf.saved_model.save(layer, self.embedding_layer_dir)
loaded_layer = embedding_layer.create_embedding_layer(embedding_layer_param, embedding_hub_url=self.embedding_layer_dir)
loaded_layer_outputs = loaded_layer(self.inputs)
for k, v in outputs.items():
self.assertAllEqual(v, loaded_layer_outputs[k])
shutil.rmtree(self.embedding_layer_dir)
if __name__ == '__main__':
tf.test.main()
|
1627974
|
from types import *
def check_type(obj,atts=[],callables=[]):
got_atts=True
for att in atts:
if not hasattr(obj,att):
got_atts=False;break
got_callables=True
for call in callables:
if not hasattr(obj,call):
got_callables=False;break
the_attr=getattr(obj,call)
if not callable(the_attr):
got_callables=False;break
if got_atts and got_callables: return -1
return 0
def is_iter(obj):
if isinstance(obj,ListType): return 1
if isinstance(obj,TupleType): return 1
if isinstance(obj,DictType): return 1
if isinstance(obj,FileType): return 1
try:
iter(obj)
return -1
except TypeError:
return 0
def is_gen(obj):
if isinstance(obj,GeneratorType): return 1
return 0
def is_seq(obj):
if isinstance(obj,ListType): return 1
if isinstance(obj,TupleType): return 1
if is_iter(obj):
try:
obj[0:0]
return -1
except TypeError:
pass
return 0
def is_mapping(obj):
if isinstance(obj,DictType): return 1
if is_iter(obj):
return check_type(obj,callables=['iteritems','has_key'])
return 0
def is_list(obj):
if isinstance(obj,ListType): return 1
if is_seq(obj):
if check_type(obj,callables=['append','extend','pop']): return -1
return 0
def is_str(obj):
if isinstance(obj, basestring): return 1
if is_iter(obj):
if check_type(obj,callables=['index','count','replace']): return -1
return 0
def is_file(obj):
if isinstance(obj,FileType): return 1
if check_type(obj,callables=['read','close']): return -1
return 0
def check_all(obj):
result=[ str(i) for i in (is_iter(obj),is_gen(obj),is_seq(obj),is_list(obj),is_str(obj),is_mapping(obj),is_file(obj))]
return '\t'.join(result)
#####################examples
print '\t'+'\t'.join(['iter','gen','seq','list','str','dict','file'])
print 'str\t',check_all('str')
print '(1,)\t',check_all((1,))
print '[]\t',check_all([])
print '{}\t',check_all({})
f=open('tmp.txt','w')
print 'file\t',check_all(f)
import cStringIO
cstr=cStringIO.StringIO()
print 'cstrio\t',check_all(cstr)
gen=(i for i in (1,2,3))
print 'gen\t',check_all(gen)
def test(): yield 1
print 'test()\t',check_all(test())
class fdict:
def iteritems(self): pass
def has_key(self): pass
print 'fdict\t',check_all(fdict)
|
1627979
|
import django
import math
register = django.template.Library()
def calc_bar(value, *args):
"""Calculate percentage of value out of the maximum
of several values, for making a bar chart."""
top = max(args + (value,))
percent = value / top * 100
return percent
def calc_mid_bar(value1, value2, *args):
"""Calculate percentage of value out of the maximum
of several values, for making a bar chart. Return
the midpoint between the height of the first and second
parameter."""
top = max(args + (value1, value2))
percent = (value1 + value2) / 2 / top * 100
return percent
register.simple_tag(calc_bar)
register.simple_tag(calc_mid_bar)
|
1628006
|
from .. util import deprecated
if deprecated.allowed():
from . channel_order import ChannelOrder
|
1628015
|
import os
import argparse
import sys
import subprocess
import multiprocessing
import pyexcel
import itertools
import nvgpu
DEFAULT_M3D_PATH = r'PATH_TO_MATTERPORT3D'
DEFAULT_DRONE_TRAJECTORIES = r'PATH_TO_TRAJECTORY_FOLDERS'
DEFAULT_BLENDER_PATH = r'PATH_TO_BLENDER_EXE'
DEFAULT_OUTPUT_PATH = r'PATH_TO_DUMP_RESULTS'
DEFAULT_RENDER_SCRIPT_PATH = ".\\render\\DroneRender.py"
DEFAULT_RENDERED_TRAJECTORIES_PATH = ".\\drone.xlsx"
def parse_arguments(args):
usage_text = (
"This script renders egocentric viewpoint pairs from Matterport3D using sampled play-generated trajectories."
"Usage: egocentric_render.py --m3d PATH_TO_MATTERPORT3D --generated trajectories PATH_TO_TRAJECTORY_FOLDERS"
" --blender PATH_TO_BLENDER_EXE --rendered_path PATH_TO_DUMP_RESULTS --render_script PATH_TO_RENDER_SCRIPT"
" --rendered_trajectories PATH_TO_DUMP_COMPLETED_JOBS"
)
parser = argparse.ArgumentParser(description=usage_text)
parser.add_argument('--m3d', type=str, help='Matterport3D root path.',\
default=DEFAULT_M3D_PATH)
parser.add_argument('--generated_trajectories', type=str, help='Root path for the generated trajectories.',\
default=DEFAULT_DRONE_TRAJECTORIES) #TODO: add code
parser.add_argument("--blender", type=str, help="Blender executable path.",\
default=DEFAULT_BLENDER_PATH)
parser.add_argument("--rendered_path", type=str, help="Output folder.",\
default=DEFAULT_OUTPUT_PATH)
parser.add_argument('--render_script', type=str, help='The render script path.',\
default=DEFAULT_RENDER_SCRIPT_PATH)
parser.add_argument('--rendered_trajectories', type=str,
help='The .xlsx file with the already rendered Matterport3D buildings that will be appended to during rendering.',\
default=DEFAULT_RENDERED_TRAJECTORIES_PATH)
return parser.parse_known_args(args)
COMMAND = (
"{0} --background --python {1} -- " # use blender's exe and render script's path
"--samples {2} --scene_model {3} --output_path {4} " #
"--camera_path {5} --device_type {6} --dataset matterport3d " # use \pose path and input flag
"--log_sheet {7} " # use primary render group flag
)
def render_trajectory(t):
command, queue = t
gpu_id = queue.get()
try:
command += "--device_id " + str(gpu_id)
print(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True)
output = process.communicate()
print(output[0])
finally:
queue.put(gpu_id)
def process(t):
command, queue = t
gpu_id = queue.get()
try:
command += "--device_id " + str(gpu_id)
finally:
queue.put(gpu_id)
if __name__ == "__main__":
if 'PROGRAMFILES' in os.environ.keys():
nvidia_smi_path = os.path.join(
os.environ['PROGRAMFILES'],
'NVIDIA Corporation',
'NVSMI'
)
if nvidia_smi_path not in os.environ['PATH']:
os.environ['PATH'] = os.environ['PATH'] + ";" + nvidia_smi_path
gpus = len(nvgpu.available_gpus(max_used_percent=30.0))
args, unknown = parse_arguments(sys.argv)
buildings_root = os.path.join(args.m3d, "v1", "scans")
print("Working on M3D buildings @ %s" % buildings_root)
rendered_trajectories = []
if args.rendered_trajectories is not None and os.path.exists(args.rendered_trajectories):
data = pyexcel.get_sheet(file_name=args.rendered_trajectories)
rendered_trajectories.extend(data.column_at(0))
commands = []
building_hashes = {}
for building_folder in os.listdir(buildings_root):
building_hash = os.path.join(buildings_root, building_folder, building_folder, "matterport_mesh")
for root, dirs, files in os.walk(building_hash, topdown = False):
obj = next((s for s in files if 'obj' in s), None)
mesh = os.path.join(root, obj)
building_hashes.update({building_folder: mesh})
if obj is not None:
break
for traj in os.listdir(args.generated_trajectories):
mesh = building_hashes[traj]
trajectory_folder = os.path.join(args.generated_trajectories, traj)
for trajectory_date in os.listdir(trajectory_folder):
if trajectory_date in rendered_trajectories:
print("Skipping already rendered building (%s)" % building_hash)
continue
for root, dirs, files in os.walk(os.path.join(trajectory_folder, trajectory_date), topdown = False):
txt = next((s for s in files if 'airsim_rec_blender' in s), None)
trajectory_file = os.path.join(root, txt)
if txt is None:
break
cmd = COMMAND.format(
args.blender, #1
args.render_script, #2
256, #3
mesh, #4
args.rendered_path, #5
trajectory_file, #6
'GPU', #7
args.rendered_trajectories, #8,
) + " -d --normal_map -f -r "
commands.extend([cmd])
pool = multiprocessing.Pool(processes=gpus)
m = multiprocessing.Manager()
q = m.Queue()
for gpu_ids in range(gpus):
q.put(gpu_ids)
pool.map(render_trajectory, zip(commands, itertools.repeat(q, len(commands))))
pool.close()
pool.join()
|
1628033
|
from __future__ import print_function
import argparse
import jinja2
import os
import io
import sys
import logging
import markdown2
from .utils import initialize_logger, readable_dir
from .templates import template_path
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
def get_args():
example_text = '''
examples:
python openassessit/to_html -i /tmp/assessment.md -o /tmp/assessment.html
python openassessit/to_html -i /tmp/assessment.md -o /tmp/assessment.html -t /your/templates
'''
parser = argparse.ArgumentParser(epilog=example_text, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input-file', help='Provide a the path to an input file', default=sys.stdin)
parser.add_argument('-o', '--output-file', help='Provide a path to where to save the html ')
parser.add_argument('-t', '--user-template-path',
action=readable_dir,
help='Provide filepath to custom user templates')
return parser.parse_args()
def read_input(input_file):
""" Read OpenAssessIt .md file """
if input_file:
if type(input_file) is str:
with io.open(input_file, encoding='utf-8') as stream:
return stream.read()
def write_output(output_file, rendered, force_stdout=False):
""" Write HTML output file """
if output_file:
with io.open(output_file, 'w', encoding='utf-8') as stream:
stream.write(rendered)
if force_stdout:
print(rendered)
def main():
""" Convert to Markdown to HTML """
args = get_args()
input_file = args.input_file
output_file = args.output_file
paths = list()
if args.user_template_path:
user_template_path = args.user_template_path
paths.append(user_template_path)
else:
paths.append(template_path)
loader = jinja2.FileSystemLoader(paths)
env = jinja2.Environment(loader=loader)
header = loader.load(env, 'to_html_header.html')
footer = loader.load(env, 'to_html_footer.html')
md = read_input(input_file)
html = markdown2.markdown(md, extras={"fenced-code-blocks": None, "toc": {"depth": 2}})
toc_html = html.toc_html
TOC_MARKER = '<!--TOC-->'
html = html.replace(TOC_MARKER, toc_html)
output = "".join([header.render(), html, footer.render()])
output_dir = os.path.dirname(input_file)
initialize_logger('html', output_dir)
write_output(output_file, output)
logging.info('HTML conversion complete in: ' + output_file)
if __name__ == '__main__':
main()
|
1628052
|
from typing import List
from .comment_tokenizer import scan_for_comment
from .iterator import LineWrapIterator
from .number_tokenizer import scan_for_number
from .preprocessor_tokenizer import scan_for_preprocessor
from .quote_tokenizer import scan_for_quote
from .remaining_tokenizer import scan_for_remaining
from .token import Token, StartToken, EndToken
from .whitespace_tokenizer import scan_for_whitespace
def tokenize(src) -> List[Token]:
it = LineWrapIterator(src)
tokens = [StartToken(it, it)]
while it.curr != '\0':
succeeded = scan_for_whitespace(tokens, it)
if not succeeded:
succeeded = scan_for_comment(tokens, it)
if not succeeded:
succeeded = scan_for_quote(tokens, it)
if not succeeded:
succeeded = scan_for_number(tokens, it)
if not succeeded:
succeeded = scan_for_preprocessor(tokens, it)
if not succeeded:
scan_for_remaining(tokens, it)
tokens.append(EndToken(it, it))
return tokens
|
1628071
|
from __future__ import absolute_import
from __future__ import unicode_literals
from unittest import TestCase
from webfriend.scripting.execute import execute_script
class FormatProxyTest(TestCase):
def setUp(self):
self.maxDiff = 10000
def _eval(self, script):
return execute_script(None, script).as_dict()
def test_autotype(self):
self.assertEqual({
'a': 42,
'b': 3.1415,
'c': True,
'd': True,
'e': True,
'f': False,
'g': False,
'h': False,
'i': -42,
'j': -3.1415,
'k': 0,
'l': 1,
}, self._eval("""
fmt::autotype "42" -> $a
fmt::autotype "3.1415" -> $b
fmt::autotype "true" -> $c
fmt::autotype "True" -> $d
fmt::autotype "TRUE" -> $e
fmt::autotype "false" -> $f
fmt::autotype "False" -> $g
fmt::autotype "FALSE" -> $h
fmt::autotype "FALSE" -> $h
fmt::autotype "-42" -> $i
fmt::autotype "-3.1415" -> $j
fmt::autotype "0" -> $k
fmt::autotype "1" -> $l
"""))
def test_strip(self):
self.assertEqual({
'a': 'Value',
'b': 'Value',
'c': 'Value',
'd': 'Value',
'e': 'Value',
'f': 'Value',
'g': ' Value',
'h': 'Value ',
'i': 'Value!',
'j': 'Value ',
'k': ' Value',
}, self._eval("""
fmt::strip 'Value' -> $a
fmt::strip ' Value' -> $b
fmt::strip 'Value ' -> $c
fmt::strip ' \n\nValue ' -> $d
fmt::strip begin
Value
end -> $e
fmt::strip 'Value' {
characters: '$',
} -> $f
fmt::strip '$ Value' {
characters: '$',
} -> $g
fmt::strip 'Value $$$' {
characters: '$',
} -> $h
fmt::strip '$$#@$Value!$&#' {
characters: '@#&$',
} -> $i
fmt::lstrip ' Value ' -> $j
fmt::rstrip ' Value ' -> $k
"""))
|
1628110
|
import json
import discord
from discord.ext import commands
from utils.converters import LanguageConverter
from utils.errors import NoAPIKey
from utils.functions import load_json
from utils.paginator import Paginator
class English(commands.Cog):
"""Commands for the english language"""
def __init__(self, bot):
self.bot = bot
# We set the variable to the bot since it is gonna be used by our converter
bot.language_codes = load_json("assets/data/language_codes.json")
@commands.command(aliases=["def", "df"])
@commands.bot_has_permissions(use_external_emojis=True)
async def define(self, ctx, word: str):
"""Sends the definition of a word"""
# getting the api key
try:
token = self.bot.api_keys["owlbot"]
except KeyError:
raise NoAPIKey
# Doing the request
heds = {"Authorization": f"Token {token}"}
async with self.bot.session.get(f"https://owlbot.info/api/v4/dictionary/{word}?format=json", headers=heds) as r:
text = await r.text()
fj = json.loads(text)
# Validation
definitions = fj["definitions"]
if len(definitions) == 0:
await ctx.send("Word not found")
# Formatting the result
embeds = []
for definition in definitions:
emb = discord.Embed(title=fj["word"], description=definition["definition"])
emb.add_field(
name="Example",
value=definition["example"].replace("<b>", "**").replace("</b>", "**"),
inline=False,
)
emb.add_field(name="Pronunciation", value=fj["pronunciation"], inline=False)
if definition["image_url"]:
emb.set_image(url=definition["image_url"])
embeds.append(emb)
# Sending the results
pag = Paginator(embeds, "Definition")
await pag.start(ctx)
@commands.command(aliases=["tr"])
# TODO: use flags
async def translate(self, ctx, lang: LanguageConverter = None, *, text: str):
"""Translates a text to another language if specified, defaults to English"""
lang = lang or "en"
result = await ctx.bot.translate_api.translate(text, dest=lang)
# We get the full language names
languageconverter = LanguageConverter()
source = languageconverter.convert(ctx, result.src)
destination = languageconverter.convert(ctx, result.dest)
embed = discord.Embed(title=f"Translation", description=result.text, color=0x2F3136)
if result.text != result.pronunciation:
embed.add_field(name="Pronunciation", value=result.pronunciation)
embed.set_footer(text=f"Translated from {source.split(';')[0]} to {destination.split(';')[0]}")
await ctx.send(embed=embed)
def setup(bot):
"""Adds the cog to the bot"""
bot.add_cog(English(bot))
|
1628116
|
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
# dp[i] = cost[i] + min(dp[i-1], dp[i-2])
# result = min(dp[-1], dp[-2])
dp = [0 for _ in range(len(cost))]
dp[0] = cost[0]
dp[1] = cost[1]
for i in range(2, len(cost)):
dp[i] = cost[i] + min(dp[i-1], dp[i-2])
return min(dp[-1], dp[-2])
|
1628117
|
import os.path
import os
from dotenv import load_dotenv
import sys
import platform
import urllib
import tarfile
import requests
from zipfile import ZipFile
from config import DOWNLOAD_PATH, VERSION_FILE
from .list import list_remote
""" Download Required kubectl / kustomize / helm / helmfile Versions """
def download_program(args, program, version, fast):
operating_sys = sys.platform
# Upsert download path
not os.path.exists(DOWNLOAD_PATH) and os.mkdir(DOWNLOAD_PATH)
if not fast:
available_versions = list_remote(args)
if version not in available_versions:
print("Version '" + version + "' is not right available " + program + " version.\
\nYou can check right available versions by running 'kubenvz kubectl/kustomize list remote'.\n")
sys.exit(1)
else:
print("Skipping remote check...")
if program == "kubectl":
url = "https://storage.googleapis.com/kubernetes-release/release/v" + version + "/bin/" + operating_sys + "/amd64/kubectl"
alternative_url = url
alternative_url_binary = url
elif program == "helm":
# https://get.helm.sh/helm-v3.1.0-darwin-amd64.tar.gz
url = "https://get.helm.sh/helm-v" + version.lstrip("v") + "-" + operating_sys + "-amd64.tar.gz"
alternative_url = url
alternative_url_binary = url
elif program == "helmfile":
# https://github.com/roboll/helmfile/releases/download/v0.100.1/helmfile_darwin_amd64
url = "https://github.com/roboll/helmfile/releases/download/v" + version.lstrip("v") + "/helmfile_" + operating_sys + "_amd64"
alternative_url = url
alternative_url_binary = url
elif program == "kustomize" and "kustomize" not in version:
url = "https://github.com/kubernetes-sigs/kustomize/releases/download/" + \
version + "/kustomize_" + version.lstrip("v") + "_" + operating_sys + "_amd64"
alternative_url = "https://github.com/kubernetes-sigs/kustomize/releases/download/" + \
version + "/kustomize_" + version.lstrip("v") + "_" + operating_sys + "_amd64.tar.gz"
alternative_url_binary = url
elif program == "kustomize" and "kustomize" in version:
url = "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F" + \
version.lstrip("kustomize/") + "/kustomize_" + version.lstrip(
"kustomize/") + "_" + operating_sys + "_amd64.tar.gz"
alternative_url = "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F" + \
version.lstrip("kustomize/") + "/kustomize_kustomize." + version.lstrip(
"kustomize/") + "_" + operating_sys + "_amd64.tar.gz"
alternative_url_binary = "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F" + \
version.lstrip("kustomize/") + "/kustomize_kustomize." + version.lstrip(
"kustomize/") + "_" + operating_sys + "_amd64"
if not os.path.exists(DOWNLOAD_PATH + program + "_" + version.lstrip("kustomize/").lstrip("v")):
print("Downloading", program, version, "from", url)
binary = requests.get(url)
if binary.status_code == 404:
print("Retrying ", program, version, "from", alternative_url)
url = alternative_url
binary = requests.get(alternative_url)
if binary.status_code == 404:
print("Retrying ", program, version, "from", alternative_url_binary)
url = alternative_url_binary
binary = requests.get(alternative_url_binary)
if binary.status_code == 404:
print("Error downloading", program, version, "from", url)
raise Exception("Invalid version, got 404 error !")
dest_path = DOWNLOAD_PATH + program + "_" + version.lstrip("kustomize/").lstrip("v")
print("Downloading to", dest_path)
open(dest_path, 'wb').write(binary.content)
if "tar.gz" in url:
tar = tarfile.open(dest_path, "r:gz")
tar.extractall(path=DOWNLOAD_PATH + '/')
tar.close()
if os.path.exists(DOWNLOAD_PATH + '/' + program) and os.path.exists(dest_path):
os.remove(dest_path)
os.rename(DOWNLOAD_PATH + '/' + program, dest_path)
elif os.path.exists(DOWNLOAD_PATH + "/" + operating_sys + "-amd64/" + program) and os.path.exists(dest_path):
os.remove(dest_path)
os.rename(DOWNLOAD_PATH + "/" + operating_sys + "-amd64/" + program, dest_path)
else:
raise Exception("Issue extracting !!")
os.chmod(dest_path, 0o755)
else:
print(program, version, "already downloaded")
""" Installs Required kubectl / kustomize / helm / helmfile Versions """
def install(args):
program = args.program
version = args.version
fast = args.f
if not version and os.path.exists(VERSION_FILE):
load_dotenv(dotenv_path=VERSION_FILE)
version = (os.getenv(program.upper()))
if not version:
print("Please define version or add that to .kubenvz file.\
\nYou don't need to mention version if you have .kubenvz file at current path. \n")
sys.exit(1)
dest_path = DOWNLOAD_PATH + program + "_" + version.lstrip("kustomize/").lstrip("v")
if program == "kubectl":
download_program(args, program, version, fast)
elif program == "kustomize":
download_program(args, program, version, fast)
elif program == "helm":
download_program(args, program, version, fast)
elif program == "helmfile":
download_program(args, program, version, fast)
else:
raise Exception(
'Invalid Arguement !! It should be either kubectl / kustomize / helm / helmfile')
if not os.access('/usr/local/bin', os.W_OK):
print("Error: User doesn't have write permission of /usr/local/bin directory.\
\n\nRun below command to grant permission and rerun 'kubenvz install' command.\
\nsudo chown -R $(whoami) /usr/local/bin\n")
sys.exit(1)
try:
os.remove("/usr/local/bin/" + program)
except FileNotFoundError:
pass
os.symlink(dest_path, "/usr/local/bin/" + program)
|
1628120
|
from argparse import ArgumentParser
from multiprocessing import Pool
import os
from NISP.dataset import NISPDataset
from NISP.lightning_model import LightningModel
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from config import NISPConfig
import torch
import torch.utils.data as data
if __name__ == "__main__":
parser = ArgumentParser(add_help=True)
parser.add_argument('--data_path', type=str, default=NISPConfig.data_path)
parser.add_argument('--speaker_csv_path', type=str, default=NISPConfig.speaker_csv_path)
parser.add_argument('--timit_wav_len', type=int, default=NISPConfig.timit_wav_len)
parser.add_argument('--batch_size', type=int, default=NISPConfig.batch_size)
parser.add_argument('--epochs', type=int, default=NISPConfig.epochs)
parser.add_argument('--alpha', type=float, default=NISPConfig.alpha)
parser.add_argument('--beta', type=float, default=NISPConfig.beta)
parser.add_argument('--gamma', type=float, default=NISPConfig.gamma)
parser.add_argument('--hidden_size', type=float, default=NISPConfig.hidden_size)
parser.add_argument('--gpu', type=int, default=NISPConfig.gpu)
parser.add_argument('--n_workers', type=int, default=NISPConfig.n_workers)
parser.add_argument('--dev', type=str, default=False)
parser.add_argument('--model_checkpoint', type=str, default=NISPConfig.model_checkpoint)
parser.add_argument('--noise_dataset_path', type=str, default=NISPConfig.noise_dataset_path)
parser = pl.Trainer.add_argparse_args(parser)
hparams = parser.parse_args()
print(f'Training Model on NISP Dataset\n#Cores = {hparams.n_workers}\t#GPU = {hparams.gpu}')
# hyperparameters and details about the model
HPARAMS = {
'data_path' : hparams.data_path,
'speaker_csv_path' : hparams.speaker_csv_path,
'data_wav_len' : hparams.timit_wav_len,
'data_batch_size' : hparams.batch_size,
'data_wav_augmentation' : 'Random Crop, Additive Noise',
'data_label_scale' : 'Standardization',
'training_optimizer' : 'Adam',
'training_lr' : NISPConfig.lr,
'training_lr_scheduler' : '-',
'model_hidden_size' : hparams.hidden_size,
'model_alpha' : hparams.alpha,
'model_beta' : hparams.beta,
'model_gamma' : hparams.gamma,
'model_architecture' : 'wav2vec + soft-attention',
}
# Training, Validation and Testing Dataset
## Training Dataset
train_set = NISPDataset(
wav_folder = os.path.join(HPARAMS['data_path'], 'TRAIN'),
csv_file = HPARAMS['speaker_csv_path'],
wav_len = HPARAMS['data_wav_len'],
noise_dataset_path = hparams.noise_dataset_path
)
## Training DataLoader
trainloader = data.DataLoader(
train_set,
batch_size=HPARAMS['data_batch_size'],
shuffle=True,
num_workers=hparams.n_workers
)
## Validation Dataset
valid_set = NISPDataset(
wav_folder = os.path.join(HPARAMS['data_path'], 'VAL'),
csv_file = HPARAMS['speaker_csv_path'],
wav_len = HPARAMS['data_wav_len'],
is_train=False
)
## Validation Dataloader
valloader = data.DataLoader(
valid_set,
batch_size=HPARAMS['data_batch_size'],
shuffle=False,
num_workers=hparams.n_workers
)
## Testing Dataset
test_set = NISPDataset(
wav_folder = os.path.join(HPARAMS['data_path'], 'TEST'),
csv_file = HPARAMS['speaker_csv_path'],
wav_len = HPARAMS['data_wav_len'],
is_train=False
)
## Testing Dataloader
testloader = data.DataLoader(
test_set,
batch_size=HPARAMS['data_batch_size'],
shuffle=False,
num_workers=hparams.n_workers
)
print('Dataset Split (Train, Validation, Test)=', len(train_set), len(valid_set), len(test_set))
#Training the Model
logger = TensorBoardLogger('NISP_logs', name='')
logger.log_hyperparams(HPARAMS)
model = LightningModel(HPARAMS)
checkpoint_callback = ModelCheckpoint(
monitor='v_loss',
mode='min',
verbose=1)
trainer = pl.Trainer(fast_dev_run=hparams.dev,
gpus=hparams.gpu,
max_epochs=hparams.epochs,
checkpoint_callback=checkpoint_callback,
callbacks=[
EarlyStopping(
monitor='v_loss',
min_delta=0.00,
patience=10,
verbose=True,
mode='min'
)
],
logger=logger,
resume_from_checkpoint=hparams.model_checkpoint
distributed_backend='ddp'
)
trainer.fit(model, train_dataloader=trainloader, val_dataloaders=valloader)
print('\n\nCompleted Training...\nTesting the model with checkpoint -', checkpoint_callback.best_model_path)
model = LightningModel.load_from_checkpoint(checkpoint_callback.best_model_path)
trainer.test(model, test_dataloaders=testloader)
|
1628123
|
from __future__ import unicode_literals
from django.db.models import fields
from django.utils.translation import ugettext_lazy as _
from ...models import FieldDefinition, FieldDefinitionManager
auto_now_help_text = _('Automatically set the field to now every time the '
'object is saved.')
auto_now_add_help_text = _('Automatically set the field to now when the '
'object is first created.')
class DateFieldDefinition(FieldDefinition):
auto_now = fields.BooleanField(_('auto now'), default=False,
help_text=auto_now_help_text)
auto_now_add = fields.BooleanField(_('auto now add'), default=False,
help_text=auto_now_add_help_text)
objects = FieldDefinitionManager()
class Meta:
app_label = 'temporal'
defined_field_class = fields.DateField
defined_field_options = ('auto_now', 'auto_now_add',)
defined_field_category = _('Temporal')
class TimeFieldDefinition(DateFieldDefinition):
class Meta:
app_label = 'temporal'
proxy = True
defined_field_class = fields.TimeField
class DateTimeFieldDefinition(DateFieldDefinition):
class Meta:
app_label = 'temporal'
proxy = True
defined_field_class = fields.DateTimeField
|
1628132
|
import numpy as np
from sklearn.metrics import r2_score
from metaflow_helper.models import KerasRegressor
from metaflow_helper.constants import RunMode
def test_keras_model_regressor_handler_train():
n_examples = 10
n_repeat = 10
offset = 0
X = np.repeat(np.arange(n_examples).astype(float)/n_examples, n_repeat)[:, None]
y = np.repeat(np.arange(n_examples).astype(float)/n_examples + offset, n_repeat)
model_handler = KerasRegressor(
build_model='metaflow_helper.models.build_keras_regression_model',
mode=RunMode.TRAIN,
input_dim=1,
dense_layer_widths=(),
dropout_probabilities=(),
)
model_handler.fit(X, y, epochs=1000, verbose=0)
y_pred = model_handler.predict(X)
np.testing.assert_allclose(y, y_pred, rtol=2)
assert r2_score(y, y_pred) > 0.9
def test_keras_model_regressor_handler_test():
n_examples = 10
n_repeat = 10
offset = 0
X = np.repeat(np.arange(n_examples).astype(float)/n_examples, n_repeat)[:, None]
y = np.repeat(np.arange(n_examples).astype(float)/n_examples + offset, n_repeat)
model_handler = KerasRegressor(
build_model='metaflow_helper.models.build_keras_regression_model',
mode=RunMode.TEST,
input_dim=1,
dense_layer_widths=(),
dropout_probabilities=(),
eval_metric='mse',
)
model_handler.fit(X, y, epochs=1000, verbose=0, validation_split=0.1, patience=2)
y_pred = model_handler.predict(X)
np.testing.assert_allclose(y, y_pred, rtol=2)
assert r2_score(y, y_pred) > 0.9
|
1628138
|
import sympy
maxLimit = 10000
for n in range(maxLimit+1):
print "true" if sympy.isprime(n) else "false"
|
1628150
|
import numpy as np
from scipy.constants import g
from floodlight.utils.types import Numeric
from floodlight.core.xy import XY
from floodlight.core.property import PlayerProperty
from floodlight.models.base import BaseModel, requires_fit
from floodlight.models.kinematics import VelocityModel, AccelerationModel
class MetabolicPowerModel(BaseModel):
"""Class for calculating Metabolic Power and derived metrics from spatiotemporal
data.
Upon calling the :func:`~MetbolicPowerModel.fit`-method, this model calculates the
frame-wise Metabolic Power for each player. The following calculations can
subsequently be queried by calling the corresponding methods:
- Frame-wise Metabolic Power --> :func:`~MetabolicPowerModel.metabolic_power`
- Cumulative Metabolic Power --> :func:`~MetabolicPowerModel.cumulative_\
metabolic_power`
- Frame-wise Equivalent Distance --> :func:`~MetabolicPowerModel.equivalent_\
distance`
- Cumulative Equivalent Distance --> :func:`~MetabolicPowerModel.cumulative_\
equivalent_distance`
Notes
-----
Metabolic Power is defined as the energy expenditure over time necessary to move at
a certain speed, and is calculated as the product of energy cost of transport per
unit body mass and distance [:math:`\\frac{J}{kg \\cdot m}`] and velocity
[:math:`\\frac{m}{s}`]. Metabolic Power and Energy cost of walking is calculated
according to di Prampero & Osgnach [1]_. Energy cost of running is calculated with
the updated formula of Minetti & Parvei [2]_.
Examples
--------
>>> import numpy as np
>>> from floodlight import XY
>>> from floodlight.models.kinetics import MetabolicPowerModel
>>> xy = XY(np.array(((0, 0), (0, 1), (1, 1), (2, 2))), framerate=20)
>>> metabolic_power_model = MetabolicPowerModel()
>>> metabolic_power_model.fit(xy)
>>> metabolic_power_model.metabolic_power()
PlayerProperty(property=array([[1164.59773017],
[ 185.59792131],
[9448.10007077],
[8593.05199423]]), name='metabolic_power', framerate=20)
>>> metabolic_power_model.cumulative_equivalent_distance()
PlayerProperty(property=array([[ 323.49936949],
[ 375.05434763],
[2999.52658952],
[5386.4854768 ]]), name='cumulative_equivalent_distance', framerate=20)
References
----------
.. [1] `<NAME>., <NAME>. (2018). Metabolic power in team sports -
Part 1: An update. International Journal of Sports Medicine, 39(08),
581-587.
<https://www.thieme-connect.de/products/ejournals/abstract/10.1055/
a-0592-7660>`_
.. [2] `<NAME>., <NAME>. (2018). Update and extension of the
‘Equivalent Slope’ of speed changing level locomotion in humans: A
computational model for shuttle running. Journal Experimental Biology,
221:jeb.182303.
<https://journals.biologists.com/jeb/article/221/15/jeb182303/19414/Update-
and-extension-of-the-equivalent-slope-of>`_
"""
# Coefficient of air resistance from di Prampero (2018).
K = 0.0037
# Coefficients of polynomial to calculate the walk-run-transition
# velocity based on the equivalent slope from di Prampero (2018).
RUNNING_TRANSITION_COEFF = np.array((-107.05, 113.13, -1.13, -15.84, -1.7, 2.27))
# Cutoffs of equivalent slope for using the corresponding polynomial to calculate
# energy cost of walking at a certain velocity from di Prampero (2018).
ECW_ES_CUTOFFS = np.array([-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4])
# Coefficients of polynomials to calculate energy cost of walking from di
# Prampero (2018).
ECW_POLY_COEFF = np.array(
[
[0.28, -1.66, 3.81, -3.96, 4.01],
[0.03, -0.15, 0.98, -2.25, 3.14],
[0.69, -3.21, 5.94, -5.07, 2.79],
[1.25, -6.57, 13.14, -11.15, 5.35],
[0.68, -4.17, 10.17, -10.31, 8.66],
[3.80, -14.91, 22.94, -14.53, 11.24],
[44.95, -122.88, 126.94, -57.46, 21.39],
[94.62, -213.94, 184.43, -68.49, 25.04],
]
)
def __init__(self):
super().__init__()
self._metabolic_power_ = None
@staticmethod
def _calc_es(vel, acc):
"""Calculates equivalent slope based on the formula by <NAME> & Osgnach
(2018)
Parameters
----------
vel: np.array
velocity
acc: np.array
acceleration
Returns
-------
es: np.array
equivalent slope
"""
es = (acc / g) + ((MetabolicPowerModel.K * np.square(vel)) / g)
return es
@staticmethod
def _calc_em(es):
"""Calculates equivalent mass based on the formula by <NAME> & Osgnach
(2018)
Parameters
----------
es: np.array
equivalent slope
Returns
-------
em: np.array
equivalent mass
"""
em = np.sqrt(np.square(es) + 1)
return em
@staticmethod
def _calc_v_trans(es: np.ndarray) -> np.ndarray:
"""Calculate the walking to running transition velocity at a certain equivalent
slope based on the formula of di Prampero (2018).
Parameters
----------
es: np.array
equivalent slope
Returns
-------
v_trans: np.array
Array with the respective transition velocity
"""
es_power = np.stack(
(
np.power(es, 5),
np.power(es, 4),
np.power(es, 3),
np.power(es, 2),
es,
np.ones(es.shape),
),
axis=-1,
)
v_trans = np.matmul(es_power, MetabolicPowerModel.RUNNING_TRANSITION_COEFF)
return v_trans
@staticmethod
def _is_running(vel: np.ndarray, es: np.ndarray) -> np.ndarray:
"""
Checks if athlete is walking or running based on the model of di Prampero
(2018).
Parameters
----------
vel: np.array
Velocity
es: np.array
Equivalent slope
Returns
-------
is_running: np.ndarray
Array containing boolean values indicating whether an athlete is running
(True) or not (False).
"""
# Calculate walk-run-transition velocity
v_trans = MetabolicPowerModel._calc_v_trans(es)
is_running = (vel >= v_trans) | (vel > 2.5)
return is_running
@staticmethod
def _get_interpolation_weight_matrix(es: np.ndarray) -> np.ndarray:
"""Calculates interpolation weight matrix. This matrix is designed for a
calculation of ECW in a single sweep by determining the interpolation weights
of all 8 ECW_ES_CUTOFFS for given ES values.
Parameters
----------
es: np.array
Equivalent slope
Returns
-------
W: np.array
Interpolation weight matrix of shape (T frames, N players,
len(ECW_ES_CUTOFFS)=8) containing interpolation coefficients from range
[0, 1].
"""
# Number of frames
T = es.shape[0]
# Number of players
N = es.shape[1]
# Pre-allocated interpolation weight matrix with 3 dimensions (T frames, N
# players, len(CUTOFFS)=8 polynomials)
W = np.zeros((T, N, len(MetabolicPowerModel.ECW_ES_CUTOFFS)))
# Index of each ES regarding its position in CUTOFFS.
# E.g. es = 0.25 -> es will be sorted between CUTOFFS[5] and CUTOFFS[6],
# idxs = 6
idxs = MetabolicPowerModel.ECW_ES_CUTOFFS.searchsorted(es)
# Mask for non-edge cases (es outside of CUTOFFS)
mask = (idxs > 0) & (idxs < 8)
# Initialize grids for appropriate indexing of W along axis=0 (time) and
# axis=1 (player)
grid_t, grid_n = np.mgrid[0:T, 0:N]
# Fill W with the right interpolation weights for each time t (axis=0),
# player n (axis=1) and polynomial (axis=2)
W[grid_t[mask], grid_n[mask], idxs[mask] - 1] = (
MetabolicPowerModel.ECW_ES_CUTOFFS[idxs[mask]] - es[mask]
) * 10
W[grid_t[mask], grid_n[mask], idxs[mask]] = (
es[mask] - MetabolicPowerModel.ECW_ES_CUTOFFS[idxs[mask] - 1]
) * 10
# Fill edge cases (es not in range of CUTOFFS) with 1 because they are
# calculated with the corresponding min/max CUTOFFS
W[idxs == 0, 0] = 1
W[idxs == 8, 7] = 1
return W
@staticmethod
def _calc_ecw(es: np.ndarray, vel: np.ndarray, em: np.ndarray) -> np.ndarray:
"""Calculates energy cost of walking based on formula (13), (14) and table
1 in di Prampero & Osgnach (2018).
Parameters
----------
es: np.array
Equivalent slope
vel: np.array
Velocity
em: np.array
Equivalent mass
Returns
-------
ECW: np.array
Energy cost of walking
"""
# Interpolation weight matrix
W = MetabolicPowerModel._get_interpolation_weight_matrix(es)
# Matrix product of ECW_ES_CUTOFFS and W, ie. weighted factors in polynomials
WC = np.matmul(W, MetabolicPowerModel.ECW_POLY_COEFF)
# Calculate vel^4 + vel^3 + vel^2 + vel + 1 for every frame and player
V = np.stack(
(
np.power(vel, 4),
np.power(vel, 3),
np.power(vel, 2),
vel,
np.ones(vel.shape),
),
axis=-1,
)
# Multiply WC and V. Calculate sum of terms. Multiply with em
ECW = np.multiply(np.multiply(WC, V).sum(axis=2), em)
return ECW
@staticmethod
def _calc_ecr(es: np.ndarray, em: np.ndarray, eccr: Numeric = 3.6) -> np.ndarray:
"""Calculates Energy cost of running based on formula (3) and (4) from
Minetti & Parvei (2018).
Parameters
----------
es: np.array
Equivalent slope
em: np.array
Equivalent mass
eccr: Numeric
Energy cost of constant running. Default is set to 3.6
:math:`\\frac{J}{kg \\cdot m}` according to di Prampero (2018). Can differ
for different turfs.
Returns
-------
ecr: np.array
Energy cost of running
"""
# Cost of negative gradient from Minetti (2018)
def _cng(es: np.ndarray):
return -8.34 * es + eccr * np.exp(13 * es)
# Cost of positive gradient
def _cpg(es: np.ndarray):
return 39.5 * es + eccr * np.exp(-4 * es)
# Energy cost of running. Where es < 0 calculate cost of negative gradient.
# Where es >= 0 calculate cost of positive gradient.
ecr = np.piecewise(es, [es < 0, es >= 0], [_cng, _cpg]) * em
return ecr
@staticmethod
def _calc_ecl(
es: np.ndarray, vel: np.ndarray, em: np.ndarray, eccr: Numeric = 3.6
) -> np.ndarray:
"""Calculate Energy cost of locomotion.
Parameters
----------
es: np.array
Equivalent slope
vel: np.array
Velocity
em: np.array
Equivalent mass
eccr: Numeric
Energy cost of constant running. Default is set to 3.6
:math:`\\frac{J}{kg \\cdot m}` according to di Prampero (2018). Can differ
for different turfs.
Returns
-------
ecl: np.array
Energy cost of locomotion
"""
# Check where locomotion is running
running = MetabolicPowerModel._is_running(vel, es)
# Calculate energy cost of walking for entire array
ecl = MetabolicPowerModel._calc_ecw(es, vel, em)
# Substitute ecw with energy cost of running where locomotion is running
ecl[running] = MetabolicPowerModel._calc_ecr(es[running], em[running], eccr)
return ecl
@staticmethod
def _calc_metabolic_power(
es: np.ndarray,
vel: np.ndarray,
em: np.ndarray,
framerate: Numeric,
eccr: Numeric = 3.6,
) -> np.ndarray:
"""Calculates metabolic power as the product of energy cost of locomotion
and velocity.
Parameters
----------
es: np.array
Equivalent slope
vel: np.array
Velocity
em: np.array
Equivalent mass
eccr: Numeric
Energy cost of constant running. Default is set to 3.6
:math:`\\frac{J}{kg \\cdot m}` according to di Prampero (2018). Can differ
for different turfs.
Returns
-------
metp: np.array
Metabolic power
"""
# Calculate energy cost of locomotion
ecl = MetabolicPowerModel._calc_ecl(es, vel, em, eccr)
# Calculate metabolic power as product of ecl and velocity (m/s)
metp = ecl * vel / framerate
return metp
def fit(
self, xy: XY, difference: str = "central", axis: str = None, eccr: Numeric = 3.6
):
"""Fit the model to the given data and calculate metabolic power for every
player.
Notes
-----
To give appropriate results, unit of coordinates must be in meter.
Parameters
----------
xy: XY
Floodlight XY Data object.
difference: {'central', 'forward}, optional
The method of differentiation to calculate velocity and acceleration.
See :func:`~floodlight.models.kinematics.VelocityModel` for further details.
axis: {None, 'x', 'y'}, optional
Optional argument that restricts distance calculation to either the x-
or y-dimension of the data. If set to None (default), distances are
calculated in both dimensions.
eccr: Numeric
Energy cost of constant running. Default is set to 3.6
:math:`\\frac{J}{kg \\cdot m}` according to di Prampero (2018). Can differ
for different turfs.
"""
# Velocity
velocity_model = VelocityModel()
velocity_model.fit(xy, difference=difference, axis=axis)
velocity = velocity_model.velocity()
# Acceleration
acceleration_model = AccelerationModel()
acceleration_model.fit(xy, difference=difference, axis=axis)
acceleration = acceleration_model.acceleration()
# Equivalent slope
equivalent_slope = MetabolicPowerModel._calc_es(
velocity.property, acceleration.property
)
# Equivalent mass
equivalent_mass = MetabolicPowerModel._calc_em(equivalent_slope)
# Metabolic power
metabolic_power = MetabolicPowerModel._calc_metabolic_power(
equivalent_slope, velocity.property, equivalent_mass, xy.framerate, eccr
)
self._metabolic_power_ = PlayerProperty(
property=metabolic_power,
name="metabolic_power",
framerate=xy.framerate,
)
@requires_fit
def metabolic_power(self) -> PlayerProperty:
"""Returns the frame-wise metabolic power as computed by the ``fit()``-method.
Returns
-------
metabolic_power: PlayerProperty
A Player Property object of shape (T, N), where T is the total number of
frames and N is the number of players. The columns contain the frame-wise
metabolic power.
"""
metabolic_power = self._metabolic_power_
return metabolic_power
@requires_fit
def cumulative_metabolic_power(self) -> PlayerProperty:
"""Returns the cumulative metabolic power.
Returns
-------
metabolic_power: PlayerProperty
A Player Property object of shape (T, N), where T is the total number of
frames and N is the number of players. The columns contain the cumulative
metabolic power calculated by numpy.nancumsum() over axis=0.
"""
cum_metp = np.nancumsum(self._metabolic_power_.property, axis=0)
cumulative_metabolic_power = PlayerProperty(
property=cum_metp,
name="cumulative_metabolic_power",
framerate=self._metabolic_power_.framerate,
)
return cumulative_metabolic_power
@requires_fit
def equivalent_distance(self, eccr: Numeric = 3.6) -> PlayerProperty:
"""Returns frame-wise equivalent distance, defined as the distance a player
could have run if moving at a constant speed and calculated as the fraction of
metabolic work and the cost of constant running.
Parameters
----------
eccr: Numeric
Energy cost of constant running. Default is set to 3.6
:math:`\\frac{J}{kg \\cdot m}` according to di Prampero (2018). Can differ
for different turfs.
Returns
-------
equivalent_distance: PlayerProperty
A Player Property object of shape (T, N), where T is the total number of
frames and N is the number of players. The columns contain the frame-wise
equivalent distance.
"""
eq_dist = self._metabolic_power_.property / eccr
cumulative_metabolic_power = PlayerProperty(
property=eq_dist,
name="equivalent_distance",
framerate=self._metabolic_power_.framerate,
)
return cumulative_metabolic_power
@requires_fit
def cumulative_equivalent_distance(self, eccr: Numeric = 3.6) -> PlayerProperty:
"""Returns cumulative equivalent distance defined as the distance a player
could have run if moving at a constant speed and calculated as the fraction
of metabolic work and the cost of constant running.
Parameters
----------
eccr: Numeric
Energy cost of constant running. Default is set to 3.6
:math:`\\frac{J}{kg \\cdot m}` according to di Prampero (2018). Can differ
for different turfs.
Returns
-------
cumulative_equivalent_distance: PlayerProperty
A Player Property object of shape (T, N), where T is the total number of
frames and N is the number of players. The columns contain the cumulative
equivalent distance calculated by numpy.nancumsum() over axis=0.
"""
cum_metp = np.nancumsum(self._metabolic_power_.property, axis=0)
cum_eqdist = cum_metp / eccr
cumulative_equivalent_distance = PlayerProperty(
property=cum_eqdist,
name="cumulative_equivalent_distance",
framerate=self._metabolic_power_.framerate,
)
return cumulative_equivalent_distance
|
1628170
|
class Node:
"""A binary tree node"""
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def morris_traversal(root):
"""Generator function for iterative inorder tree traversal"""
current = root
while current is not None:
if current.left is None:
yield current.data
current = current.right
else:
# Find the inorder predecessor of current
pre = current.left
while pre.right is not None and pre.right is not current:
pre = pre.right
if pre.right is None:
# Make current as right child of its inorder predecessor
pre.right = current
current = current.left
else:
# Revert the changes made in the 'if' part to restore the
# original tree. i.e., fix the right child of predecessor
pre.right = None
yield current.data
current = current.right
# Driver program to test the above function
root = Node(1,
right = Node(3),
left = Node(2,
left = Node(4),
right = Node(5)
)
)
for v in morris_traversal(root):
print(v, end=' ')
|
1628172
|
from abc import ABCMeta
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.cnn import normal_init, constant_init
from core.gdrn_selfocc_modeling.tools.layers.layer_utils import resize
from core.gdrn_selfocc_modeling.tools.layers.conv_module import ConvModule
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
"""
def __init__(
self,
in_channels,
channels,
*,
dropout_ratio=0.1,
conv_cfg=None,
norm=None,
act="relu",
in_index=-1,
input_transform=None,
align_corners=False,
):
super(BaseDecodeHead, self).__init__()
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm = norm
self.act = act
self.in_index = in_index
self.align_corners = align_corners
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f"input_transform={self.input_transform}, " f"align_corners={self.align_corners}"
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ["resize_concat", "multiple_select"]
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert len(in_channels) == len(in_index)
if input_transform == "resize_concat":
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == "resize_concat":
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(input=x, size=inputs[0].shape[2:], mode="bilinear", align_corners=self.align_corners)
for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == "multiple_select":
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
class FPNMaskXyzRegionHead(BaseDecodeHead):
"""Panoptic Feature Pyramid Networks. This head is the implementation of
`Semantic FPN.
<https://arxiv.org/abs/1901.02446>`_.
Args:
feature_strides (tuple[int]): The strides for input feature maps.
stack_lateral. All strides suppose to be power of 2. The first
one is of largest resolution.
"""
def __init__(
self,
feature_strides,
out_kernel_size=1,
out_layer_shared=True,
mask_num_classes=1,
xyz_num_classes=1,
region_num_classes=1,
mask_out_dim=1,
xyz_out_dim=3,
region_out_dim=65, # 64+1,
**kwargs,
):
super().__init__(input_transform="multiple_select", **kwargs)
assert len(feature_strides) == len(self.in_channels)
assert min(feature_strides) == feature_strides[0]
self.feature_strides = feature_strides
self.scale_heads = nn.ModuleList()
for i in range(len(feature_strides)):
head_length = max(1, int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
scale_head = []
for k in range(head_length):
scale_head.append(
ConvModule(
self.in_channels[i] if k == 0 else self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm=self.norm,
act=self.act,
)
)
if feature_strides[i] != feature_strides[0]:
scale_head.append(nn.Upsample(scale_factor=2, mode="bilinear", align_corners=self.align_corners))
self.scale_heads.append(nn.Sequential(*scale_head))
self.out_layer_shared = out_layer_shared
self.mask_num_classes = mask_num_classes
self.xyz_num_classes = xyz_num_classes
self.region_num_classes = region_num_classes
self.mask_out_dim = mask_out_dim
self.xyz_out_dim = xyz_out_dim
self.region_out_dim = region_out_dim
_in_dim = self.channels
if self.out_layer_shared:
out_dim = (
self.mask_out_dim * self.mask_num_classes
+ self.xyz_out_dim * self.xyz_num_classes
+ self.region_out_dim * self.region_num_classes
)
self.out_layer = nn.Conv2d(
_in_dim,
out_dim,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
else:
self.mask_out_layer = nn.Conv2d(
_in_dim,
self.mask_out_dim * self.mask_num_classes,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
self.xyz_out_layer = nn.Conv2d(
_in_dim,
self.xyz_out_dim * self.xyz_num_classes,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
self.region_out_layer = nn.Conv2d(
_in_dim,
self.region_out_dim * self.region_num_classes,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
# init
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, nn.ConvTranspose2d):
normal_init(m, std=0.001)
# init output layers
if self.out_layer_shared:
normal_init(self.out_layer, std=0.01)
else:
normal_init(self.mask_out_layer, std=0.01)
normal_init(self.xyz_out_layer, std=0.01)
normal_init(self.region_out_layer, std=0.01)
def forward(self, inputs):
x = self._transform_inputs(inputs) # strides: [4, 8, 16, 32]
output = self.scale_heads[0](x[0])
for i in range(1, len(self.feature_strides)):
# non inplace
output = output + resize(
self.scale_heads[i](x[i]), size=output.shape[2:], mode="bilinear", align_corners=self.align_corners
)
output = self.get_output(output)
return output
def get_output(self, x):
if self.dropout is not None:
x = self.dropout(x)
if self.out_layer_shared:
out = self.out_layer(x)
mask_dim = self.mask_out_dim * self.mask_num_classes
mask = out[:, :mask_dim, :, :]
xyz_dim = self.xyz_out_dim * self.xyz_num_classes
xyz = out[:, mask_dim : mask_dim + xyz_dim, :, :]
region = out[:, mask_dim + xyz_dim :, :, :]
bs, c, h, w = xyz.shape
xyz = xyz.view(bs, 3, xyz_dim // 3, h, w)
coor_x = xyz[:, 0, :, :, :]
coor_y = xyz[:, 1, :, :, :]
coor_z = xyz[:, 2, :, :, :]
else:
mask = self.mask_out_layer(x)
xyz = self.xyz_out_layer(x)
bs, c, h, w = xyz.shape
xyz = xyz.view(bs, 3, c // 3, h, w)
coor_x = xyz[:, 0, :, :, :]
coor_y = xyz[:, 1, :, :, :]
coor_z = xyz[:, 2, :, :, :]
region = self.region_out_layer(x)
return mask, coor_x, coor_y, coor_z, region
|
1628186
|
import random
import string
import pytest
from django.core.files import File
def randomword(length=5):
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(length))
@pytest.fixture()
def tempFile() -> File:
from django.core.files.uploadedfile import SimpleUploadedFile
return SimpleUploadedFile(f"filename-{randomword()}.pdf", b"file-contents") # must be bytestring
|
1628219
|
from discord.ext import commands
from discord_components import Button
from discord_slash import SlashContext, cog_ext, ButtonStyle
import discord
import random
from External_functions import cembed, equalise
from stuff import req, re
class Polls(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="polling", description="Seperate options with |")
async def polling_slash(self, ctx, question, channel: discord.TextChannel, options):
await ctx.defer()
await self.poll(ctx, options, channel, question=question)
@commands.command()
async def poll(self, ctx, options, channel_to_send: discord.TextChannel = None, *, question):
count = {}
req()
author_list = {}
names = {}
channel = channel_to_send
print(type(channel_to_send))
if type(channel_to_send) == str:
channel = ctx.channel
question = channel_to_send + question
if ctx.guild.id == 858955930431258624:
channel = ctx.channel
options = options.replace("_", " ").split("|")
components = []
for i in options:
components.append(
Button(style=random.choice([ButtonStyle.green, ButtonStyle.blue]), label=i)
)
count[i] = 0
await ctx.send("Done")
mess = await channel.send(
embed=cembed(
title=f"Poll from {ctx.author.name}",
description=f"```yaml\n{question}```",
color=re[8],
thumbnail=self.bot.user.avatar_url_as(format="png"),
),
components=[components],
)
def check(res):
return mess.id == res.message.id
while True:
res = await self.bot.wait_for("button_click", check=check)
if res.component.label in count and res.author.id not in author_list:
author_list[res.author.id] = res.component.label
count[res.component.label] += 1
else:
count[author_list[res.author.id]] -= 1
count[res.component.label] += 1
author_list[res.author.id] = res.component.label
description = question + "\n\n"
avg = sum(list(count.values())) // len(options)
avg = 1 if avg == 0 else avg
copy_count = equalise(list(count.keys()))
for i in list(count.keys()):
description += f"{copy_count[i]} |" + chr(9606) * (count[i] // avg) + "\n"
_ = [
names.update({i: self.bot.get_user(i).name})
for i in author_list
if i not in names
]
people = "\n" + "\n".join([names[i] for i in author_list])
st = "\n"
for i in list(count.keys()):
st += f"{copy_count[i]}: {(count[i] * 100) // len(author_list)}%\n"
people = st + "\n" + people
await res.edit_origin(
embed=cembed(
title=f"Poll from {ctx.author.name}",
description=f"```yaml\n{description}```" + "\n" + people,
color=re[8],
thumbnail=self.bot.user.avatar_url_as(format="png"),
)
)
def setup(bot):
bot.add_cog(Polls(bot))
|
1628340
|
import warnings
warnings.filterwarnings('ignore', category=UserWarning)
from numpy.testing import assert_equal, assert_almost_equal
import os
import sys
import numpy as np
import skvideo.io
import skvideo.datasets
import skvideo.measure
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
#TODO: Check blas implementation, then check numerical accuracy.
# The required inverse operation in ST-RRED differs across
# blas implementations
def test_scenedet():
vidpath = skvideo.datasets.bikes()
vid = skvideo.io.vread(vidpath)
boundaries = skvideo.measure.scenedet(vid)
correct_boundaries = [0, 30, 76, 137, 187, 242]
assert len(correct_boundaries) == len(boundaries), "Scene detection failed."
error = np.sum(np.abs(correct_boundaries - boundaries))
assert error < 1e-9, "Wrong boundaries detected"
|
1628349
|
from wharfee.__init__ import __version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
description='Wharfee: a shell for Docker',
author='<NAME>',
url='http://wharfee.com',
download_url='http://github.com/j-bennet/wharfee',
author_email='i[dot]chernyavska[at]gmail[dot]com',
version=__version__,
license='LICENSE.txt',
install_requires=[
'six>=1.9.0',
'pygments>=2.0.2',
'prompt_toolkit>=1.0.0,<1.1.0',
'docker-py>=1.6.0',
'tabulate>=0.7.5',
'click>=4.0',
'py-pretty>=0.1',
'configobj>=5.0.6',
'pexpect>=3.3',
'fuzzyfinder>=1.0.0',
'ruamel.yaml>=0.15.72',
],
extras_require={
'testing': [
'pytest>=2.7.0',
'mock>=1.0.1',
'tox>=1.9.2'
],
},
entry_points={
'console_scripts': [
'wharfee = wharfee.main:cli',
'wharfee-ops = scripts.optionizer:main',
]
},
packages=['wharfee'],
package_data={'wharfee': ['wharfeerc']},
scripts=[],
name='wharfee',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
1628483
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import pynab.ids
from pynab.db import db_session, MetaBlack
def local_postprocess():
with db_session() as db:
# noinspection PyComparisonWithNone,PyComparisonWithNone
db.query(MetaBlack).filter(MetaBlack.status=='IMPOSSIBLE').filter((MetaBlack.movie!=None)|(MetaBlack.tvshow!=None)).delete(synchronize_session=False)
pynab.ids.process('movie', online=False)
pynab.ids.process('tv', online=False)
if __name__ == '__main__':
print('This script will attempt to post-process releases against local databases.')
print('After importing or collecting a large batch of releases, you can run this once prior to start.py.')
print('This will check all local matches first, leaving start.py to just do remote matching.')
print('It\'ll really just save some time.')
print()
input('To continue, press enter. To exit, press ctrl-c.')
local_postprocess()
|
1628490
|
import image_embeddings.downloader
import image_embeddings.inference
import image_embeddings.knn
import image_embeddings.cli
|
1628506
|
import unittest
from pyfinder import Crawler
class TestCrawler(unittest.TestCase):
def setUp(self):
self.crawler = Crawler()
self.n = 10
def test_crawl(self):
self.crawler.run()
@unittest.skip("Skipping test_build")
def test_build_test(self):
images = self.crawler.build_test(num_images_test=self.n)
self.assertEqual(len(images), self.n)
@unittest.skip("Skipping test_load test")
def test_load_test(self):
load = self.crawler.load_test_images()
self.assertEqual(self.n, len(load))
if __name__ == '__main__':
unittest.main()
|
1628581
|
import logging
import unittest
import requests
from configcatclient import DataGovernance
try:
from unittest import mock
except ImportError:
import mock
try:
from unittest.mock import Mock, ANY
except ImportError:
from mock import Mock, ANY
from configcatclient.configfetcher import ConfigFetcher
logging.basicConfig(level=logging.WARN)
test_json = {"test": "json"}
class MockHeader:
def __init__(self, etag):
self.etag = etag
def get(self, name):
if name == 'Etag':
return self.etag
return None
class MockResponse:
def __init__(self, json_data, status_code, etag=None):
self.json_data = json_data
self.status_code = status_code
self.headers = MockHeader(etag)
def json(self):
return self.json_data
def raise_for_status(self):
if 200 <= self.status_code < 300 or self.status_code == 304:
return
raise Exception(self.status_code)
# An organization with Global data_governance config.json representation
def mocked_requests_get_global(*args, **kwargs):
if args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 0
},
"f": test_json
}, 200)
elif args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 0
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# An organization with EuOnly data_governance config.json representation
def mocked_requests_get_eu_only(*args, **kwargs):
if args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-eu.configcat.com",
"r": 1
},
"f": {}
}, 200)
elif args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-eu.configcat.com",
"r": 0
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# An organization with Global data_governance config.json representation with custom baseurl
def mocked_requests_get_custom(*args, **kwargs):
if args[0] == 'https://custom.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 0
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# Redirect loop in config.json
def mocked_requests_get_redirect_loop(*args, **kwargs):
if args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-eu.configcat.com",
"r": 1
},
"f": test_json
}, 200)
elif args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 1
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# An organization with forced=2 redirection config.json representation
def mocked_requests_get_forced_2(*args, **kwargs):
if args[0] == 'https://custom.configcat.com/configuration-files//config_v5.json' \
or args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json' \
or args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json'\
or args[0] == 'https://forced.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://forced.configcat.com",
"r": 2
},
"f": test_json
}, 200)
return MockResponse(None, 404)
call_to_global = mock.call('https://cdn-global.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
call_to_eu = mock.call('https://cdn-eu.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
call_to_custom = mock.call('https://custom.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
call_to_forced = mock.call('https://forced.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
class DataGovernanceTests(unittest.TestCase):
@mock.patch('requests.get', side_effect=mocked_requests_get_global)
def test_sdk_global_organization_global(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# and the second should call https://cdn-global.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_global, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_global)
def test_sdk_eu_organization_global(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# and the second should call https://cdn-global.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_global, mock_get.call_args_list[1])
@mock.patch('requests.get', side_effect=mocked_requests_get_eu_only)
def test_sdk_global_organization_eu_only(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# with an immediate redirect to https://cdn-eu.configcat.com
# and the second should call https://cdn-eu.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
self.assertEqual(call_to_eu, mock_get.call_args_list[2])
@mock.patch('requests.get', side_effect=mocked_requests_get_eu_only)
def test_sdk_eu_organization_eu_only(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# and the second should call https://cdn-eu.configcat.com
# without redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
@mock.patch('requests.get', side_effect=mocked_requests_get_custom)
def test_sdk_global_custom_base_url(self, mock_get):
# In this case
# the first invocation should call https://custom.configcat.com
# and the second should call https://custom.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global,
base_url='https://custom.configcat.com')
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_custom, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_custom)
def test_sdk_eu_custom_base_url(self, mock_get):
# In this case
# the first invocation should call https://custom.configcat.com
# and the second should call https://custom.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly,
base_url='https://custom.configcat.com')
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_custom, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_forced_2)
def test_sdk_global_forced(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# with an immediate redirect to https://forced.configcat.com
# and the second should call https://forced.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertEqual(call_to_forced, mock_get.call_args_list[2])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_forced_2)
def test_sdk_eu_forced(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# with an immediate redirect to https://forced.configcat.com
# and the second should call https://forced.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertEqual(call_to_forced, mock_get.call_args_list[2])
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_forced_2)
def test_sdk_base_url_forced(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# with an immediate redirect to https://forced.configcat.com
# and the second should call https://forced.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global,
base_url='https://custom.configcat.com')
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertEqual(call_to_forced, mock_get.call_args_list[2])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_redirect_loop)
def test_sdk_redirect_loop(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# with an immediate redirect to https://cdn-eu.configcat.com
# with an immediate redirect to https://cdn-global.configcat.com
# the second invocation should call https://cdn-eu.configcat.com
# with an immediate redirect to https://cdn-global.configcat.com
# with an immediate redirect to https://cdn-eu.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
self.assertEqual(call_to_global, mock_get.call_args_list[2])
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 6)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
self.assertEqual(call_to_global, mock_get.call_args_list[2])
self.assertEqual(call_to_eu, mock_get.call_args_list[3])
self.assertEqual(call_to_global, mock_get.call_args_list[4])
self.assertEqual(call_to_eu, mock_get.call_args_list[5])
|
1628593
|
from bs4 import BeautifulSoup
import requests
choice = {
'1': 'english',
'2': 'hindi',
'3': 'punjabi',
'4': 'bengali',
'5': 'gujarati'
}
ch = input('Enter your choice (1-5) : ')
res = requests.get('https://www.saavn.com/s/featured/' + choice[ch] + '/Weekly_Top_Songs')
soup = BeautifulSoup(res.text, 'lxml')
data = soup.find('ol', {'class': 'content-list'})
all_songs = data.find_all('div', {'class': 'details'})
for count, s in enumerate(all_songs, 1):
song = s.find('p', {'class': 'song-name'})
print(count, song.text)
|
1628680
|
import re
import sys
from collections import namedtuple
import pytest
import mock_autogen.generator
import tests.sample.code.tested_module
import tests.sample.code.second_module
from tests.sample.code.comprehensions_and_loops import get_square_root, \
summarize_environ_values, trimmed_strings, \
get_square_root_external_variable
from tests.sample.code.same_method_name import get_username_and_password
from tests.sample.code.subscripts import list_subscript_games
import tests.sample.code.with_statements as with_statements
MOCKED_MODULES_HEADER = "# mocked modules\n"
MOCKED_MODULES = "mock_os = mocker.MagicMock(name='os')\n" \
"mocker.patch('tests.sample.code.tested_module.os', " \
"new=mock_os)\n" \
"mock_random = mocker.MagicMock(name='random')\n" \
"mocker.patch('tests.sample.code.tested_module.random', " \
"new=mock_random)\n" \
"mock_second_module = " \
"mocker.MagicMock(name='second_module')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"second_module', new=mock_second_module)\n" \
"mock_zipfile = mocker.MagicMock(name='zipfile')\n" \
"mocker.patch('tests.sample.code.tested_module.zipfile', " \
"new=mock_zipfile)\n"
MOCKED_DEPENDENCIES_HEADER = "# mocked dependencies\n"
MOCKED_FUNCTIONS_HEADER = "# mocked functions\n"
MOCKED_FUNCTIONS = "mock_add = mocker.MagicMock(name='add')\n" \
"mocker.patch('tests.sample.code.tested_module.add', " \
"new=mock_add)\n" \
"mock_append_to_cwd = " \
"mocker.MagicMock(name='append_to_cwd')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"append_to_cwd', new=mock_append_to_cwd)\n" \
"mock_are_in_same_folder = " \
"mocker.MagicMock(name='are_in_same_folder')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"are_in_same_folder', new=mock_are_in_same_folder)\n" \
"mock_base_64_partial_functions = " \
"mocker.MagicMock(name='base_64_partial_functions')\n" \
"mocker.patch('tests.sample.code." \
"tested_module.base_64_partial_functions', " \
"new=mock_base_64_partial_functions)\n" \
"mock_base_64_whole_modules = " \
"mocker.MagicMock(name='base_64_whole_modules')\n" \
"mocker.patch('tests.sample.code." \
"tested_module.base_64_whole_modules', " \
"new=mock_base_64_whole_modules)\n" \
"mock_get_current_time = " \
"mocker.MagicMock(name='get_current_time')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"get_current_time', new=mock_get_current_time)\n" \
"mock_get_random_number = mocker.MagicMock" \
"(name='get_random_number')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"get_random_number', new=mock_get_random_number)\n" \
"mock_os_remove_wrap = mocker.MagicMock(" \
"name='os_remove_wrap')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"os_remove_wrap', new=mock_os_remove_wrap)\n" \
"mock_other_dir = mocker.MagicMock(name='other_dir')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"other_dir', new=mock_other_dir)\n" \
"mock_process_and_zip = mocker.MagicMock(" \
"name='process_and_zip')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"process_and_zip', new=mock_process_and_zip)\n" \
"mock_rm_alias = mocker.MagicMock(name='rm_alias')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"rm_alias', new=mock_rm_alias)\n" \
"mock_second_dir = mocker.MagicMock(name='second_dir')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"second_dir', new=mock_second_dir)\n" \
"mock_use_first_class = mocker.MagicMock(" \
"name='use_first_class')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"use_first_class', new=mock_use_first_class)\n" \
"mock_use_second_class_static = mocker.MagicMock(" \
"name='use_second_class_static')\n" \
"mocker.patch('tests.sample.code.tested_module." \
"use_second_class_static', " \
"new=mock_use_second_class_static)\n"
MOCKED_BUILTIN = "mock_os_remove = mocker.MagicMock(name='os_remove')\n" \
"mocker.patch('tests.sample.code.tested_module.os_remove', " \
"new=mock_os_remove)\n"
MOCKED_METHODS_HEADER = "# mocked methods\n"
MOCKED_METHODS = "mocker.patch.object(first, 'increase_class_counter')\n" \
"mocker.patch.object(first, 'increase_global_counter')\n" \
"mocker.patch.object(first, 'not_implemented')\n" \
"mocker.patch.object(first, 'using_not_implemented')\n"
MOCKED_CLASSES_HEADER = "# mocked classes\n"
MOCKED_CLASSES = "mock_FirstClass = mocker.MagicMock(name='FirstClass', " \
"spec=tests.sample.code.tested_module.FirstClass)\n" \
"mocker.patch('tests.sample.code.tested_module.FirstClass'," \
" new=mock_FirstClass)\n" \
"mock_SecondClass = mocker.MagicMock(name='SecondClass', " \
"spec=tests.sample.code.tested_module.SecondClass)\n" \
"mocker.patch('tests.sample.code.tested_module.SecondClass'" \
", new=mock_SecondClass)\n"
MOCKED_REFERENCED_CLASSES = "mock_dt = mocker.MagicMock(name='dt', " \
"spec=tests.sample.code.tested_module.dt)\n" \
"mocker.patch('tests.sample.code.tested_module." \
"dt', new=mock_dt)\n"
MOCKED_CLASSES_STATIC = """
class MockedFirstClassMeta(type):
static_instance = mocker.MagicMock(spec=tests.sample.code.tested_module.FirstClass)
def __getattr__(cls, key):
return MockedFirstClassMeta.static_instance.__getattr__(key)
class MockedFirstClass(metaclass=MockedFirstClassMeta):
original_cls = tests.sample.code.tested_module.FirstClass
instances = []
def __new__(cls, *args, **kwargs):
MockedFirstClass.instances.append(mocker.MagicMock(spec=MockedFirstClass.original_cls))
MockedFirstClass.instances[-1].__class__ = MockedFirstClass
return MockedFirstClass.instances[-1]
mocker.patch('tests.sample.code.tested_module.FirstClass', new=MockedFirstClass)
class MockedSecondClassMeta(type):
static_instance = mocker.MagicMock(spec=tests.sample.code.tested_module.SecondClass)
def __getattr__(cls, key):
return MockedSecondClassMeta.static_instance.__getattr__(key)
class MockedSecondClass(metaclass=MockedSecondClassMeta):
original_cls = tests.sample.code.tested_module.SecondClass
instances = []
def __new__(cls, *args, **kwargs):
MockedSecondClass.instances.append(mocker.MagicMock(spec=MockedSecondClass.original_cls))
MockedSecondClass.instances[-1].__class__ = MockedSecondClass
return MockedSecondClass.instances[-1]
mocker.patch('tests.sample.code.tested_module.SecondClass', new=MockedSecondClass)
"""
MOCKED_REFERENCED_CLASSES_STATIC = """
class MockeddtMeta(type):
static_instance = mocker.MagicMock(spec=tests.sample.code.tested_module.dt)
def __getattr__(cls, key):
return MockeddtMeta.static_instance.__getattr__(key)
class Mockeddt(metaclass=MockeddtMeta):
original_cls = tests.sample.code.tested_module.dt
instances = []
def __new__(cls, *args, **kwargs):
Mockeddt.instances.append(mocker.MagicMock(spec=Mockeddt.original_cls))
Mockeddt.instances[-1].__class__ = Mockeddt
return Mockeddt.instances[-1]
mocker.patch('tests.sample.code.tested_module.dt', new=Mockeddt)
"""
PREPARE_ASSERTS_CALLS_HEADER = "# calls to generate_asserts, put this after the 'act'\nimport mock_autogen\n"
PREPARE_ASSERTS_CALLS_DEFAULT = """mock_autogen.generate_asserts(mock_os, name='mock_os')
mock_autogen.generate_asserts(mock_random, name='mock_random')
mock_autogen.generate_asserts(mock_second_module, name='mock_second_module')
mock_autogen.generate_asserts(mock_zipfile, name='mock_zipfile')
mock_autogen.generate_asserts(mock_os_remove, name='mock_os_remove')
mock_autogen.generate_asserts(mock_dt, name='mock_dt')
"""
PREPARE_ASSERTS_CALLS_ALL = """mock_autogen.generate_asserts(mock_os, name='mock_os')
mock_autogen.generate_asserts(mock_random, name='mock_random')
mock_autogen.generate_asserts(mock_second_module, name='mock_second_module')
mock_autogen.generate_asserts(mock_zipfile, name='mock_zipfile')
mock_autogen.generate_asserts(mock_add, name='mock_add')
mock_autogen.generate_asserts(mock_append_to_cwd, name='mock_append_to_cwd')
mock_autogen.generate_asserts(mock_are_in_same_folder, name='mock_are_in_same_folder')
mock_autogen.generate_asserts(mock_base_64_partial_functions, name='mock_base_64_partial_functions')
mock_autogen.generate_asserts(mock_base_64_whole_modules, name='mock_base_64_whole_modules')
mock_autogen.generate_asserts(mock_get_current_time, name='mock_get_current_time')
mock_autogen.generate_asserts(mock_get_random_number, name='mock_get_random_number')
mock_autogen.generate_asserts(mock_os_remove_wrap, name='mock_os_remove_wrap')
mock_autogen.generate_asserts(mock_other_dir, name='mock_other_dir')
mock_autogen.generate_asserts(mock_process_and_zip, name='mock_process_and_zip')
mock_autogen.generate_asserts(mock_rm_alias, name='mock_rm_alias')
mock_autogen.generate_asserts(mock_second_dir, name='mock_second_dir')
mock_autogen.generate_asserts(mock_use_first_class, name='mock_use_first_class')
mock_autogen.generate_asserts(mock_use_second_class_static, name='mock_use_second_class_static')
mock_autogen.generate_asserts(mock_os_remove, name='mock_os_remove')
mock_autogen.generate_asserts(mock_FirstClass, name='mock_FirstClass')
mock_autogen.generate_asserts(mock_SecondClass, name='mock_SecondClass')
mock_autogen.generate_asserts(mock_dt, name='mock_dt')
"""
MOCKED_WARNINGS_HEADER = "# warnings\n"
MocksAllCollection = namedtuple(
'MocksAllCollection', 'os, second_module, add, append_to_cwd, '
'are_in_same_folder, other_dir, '
'rm_alias, os_remove_wrap, second_dir, os_remove')
MocksModulesOnlyCollection = namedtuple('MocksModulesOnlyCollection',
'os, second_module, zipfile')
MocksReferencedClassesOnlyCollection = namedtuple('MocksClassesOnlyCollection',
'datetime')
MocksFunctionsOnlyCollection = namedtuple(
'MocksFunctionsOnlyCollection', 'add, append_to_cwd, '
'are_in_same_folder, '
'other_dir, rm_alias, '
'os_remove_wrap, second_dir')
MocksBuiltinOnlyCollection = namedtuple('MocksAllCollection', 'os_remove')
@pytest.fixture
def mock_referenced_classes_only_collection(mocker):
"""
The mocks are taken from `test_generate_mocks_referenced_classes_only` :)
Args:
mocker (pytest.fixture): the mocker fixture
Yields:
MocksReferencedClassesOnlyCollection: The generated mocks.
"""
# mocked classes
mock_dt = mocker.MagicMock(name='dt',
spec=tests.sample.code.tested_module.dt)
mocker.patch('tests.sample.code.tested_module.dt', new=mock_dt)
yield MocksReferencedClassesOnlyCollection(mock_dt)
@pytest.fixture
def mock_modules_only_collection(mocker):
"""
The mocks are taken from `test_generate_mocks_modules_only` :)
Args:
mocker (pytest.fixture): the mocker fixture
Yields:
MocksModulesOnlyCollection: The generated mocks.
"""
# mocked modules
mock_os = mocker.MagicMock(name='os')
mocker.patch('tests.sample.code.tested_module.os', new=mock_os)
mock_second_module = mocker.MagicMock(name='second_module')
mocker.patch('tests.sample.code.tested_module.second_module',
new=mock_second_module)
mock_zipfile = mocker.MagicMock(name='zipfile')
mocker.patch('tests.sample.code.tested_module.zipfile', new=mock_zipfile)
yield MocksModulesOnlyCollection(mock_os, mock_second_module, mock_zipfile)
@pytest.fixture
def mock_functions_only_collection(mocker):
"""
The mocks are taken from `test_generate_mocks_functions_only` :)
Args:
mocker (pytest.fixture): the mocker fixture
Yields:
MocksFunctionsOnlyCollection: The generated mocks.
"""
# mocked functions
mock_add = mocker.MagicMock(name='add')
mocker.patch('tests.sample.code.tested_module.add', new=mock_add)
mock_append_to_cwd = mocker.MagicMock(name='append_to_cwd')
mocker.patch('tests.sample.code.tested_module.append_to_cwd',
new=mock_append_to_cwd)
mock_are_in_same_folder = mocker.MagicMock(name='are_in_same_folder')
mocker.patch('tests.sample.code.tested_module.are_in_same_folder',
new=mock_are_in_same_folder)
mock_other_dir = mocker.MagicMock(name='other_dir')
mocker.patch('tests.sample.code.tested_module.other_dir',
new=mock_other_dir)
mock_process_and_zip = mocker.MagicMock(name='process_and_zip')
mocker.patch('tests.sample.code.tested_module.process_and_zip',
new=mock_process_and_zip)
mock_rm_alias = mocker.MagicMock(name='rm_alias')
mocker.patch('tests.sample.code.tested_module.rm_alias', new=mock_rm_alias)
mock_os_remove_wrap = mocker.MagicMock(name='os_remove_wrap')
mocker.patch('tests.sample.code.tested_module.os_remove_wrap',
new=mock_os_remove_wrap)
mock_second_dir = mocker.MagicMock(name='second_dir')
mocker.patch('tests.sample.code.tested_module.second_dir',
new=mock_second_dir)
yield MocksFunctionsOnlyCollection(mock_add, mock_append_to_cwd,
mock_are_in_same_folder, mock_other_dir,
mock_rm_alias, mock_os_remove_wrap,
mock_second_dir)
@pytest.fixture
def mock_builtin_only_collection(mocker):
"""
The mocks are taken from `test_generate_mocks_builtin_only` :)
Args:
mocker (pytest.fixture): the mocker fixture
Yields:
MocksBuiltinOnlyCollection: The generated mocks.
"""
# mocked functions
mock_os_remove = mocker.MagicMock(name='os_remove')
mocker.patch('tests.sample.code.tested_module.os_remove',
new=mock_os_remove)
yield MocksBuiltinOnlyCollection(mock_os_remove)
@pytest.fixture
def mock_everything_collection(mocker):
"""
The mocks are taken from `test_generate_mocks_all` :)
Args:
mocker (pytest.fixture): the mocker fixture
Yields:
MocksAllCollection: The generated mocks.
"""
# mocked modules
mock_os = mocker.MagicMock(name='os')
mocker.patch('tests.sample.code.tested_module.os', new=mock_os)
mock_second_module = mocker.MagicMock(name='second_module')
mocker.patch('tests.sample.code.tested_module.second_module',
new=mock_second_module)
# mocked functions
mock_add = mocker.MagicMock(name='add')
mocker.patch('tests.sample.code.tested_module.add', new=mock_add)
mock_append_to_cwd = mocker.MagicMock(name='append_to_cwd')
mocker.patch('tests.sample.code.tested_module.append_to_cwd',
new=mock_append_to_cwd)
mock_are_in_same_folder = mocker.MagicMock(name='are_in_same_folder')
mocker.patch('tests.sample.code.tested_module.are_in_same_folder',
new=mock_are_in_same_folder)
mock_other_dir = mocker.MagicMock(name='other_dir')
mocker.patch('tests.sample.code.tested_module.other_dir',
new=mock_other_dir)
mock_rm_alias = mocker.MagicMock(name='rm_alias')
mocker.patch('tests.sample.code.tested_module.rm_alias', new=mock_rm_alias)
mock_os_remove_wrap = mocker.MagicMock(name='os_remove_wrap')
mocker.patch('tests.sample.code.tested_module.os_remove_wrap',
new=mock_os_remove_wrap)
mock_second_dir = mocker.MagicMock(name='second_dir')
mocker.patch('tests.sample.code.tested_module.second_dir',
new=mock_second_dir)
mock_os_remove = mocker.MagicMock(name='os_remove')
mocker.patch('tests.sample.code.tested_module.os_remove',
new=mock_os_remove)
yield MocksAllCollection(mock_os, mock_second_module, mock_add,
mock_append_to_cwd, mock_are_in_same_folder,
mock_other_dir, mock_rm_alias,
mock_os_remove_wrap, mock_second_dir,
mock_os_remove)
def test_generate_mocks_modules_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=True,
mock_functions=False,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False,
prepare_asserts_calls=False)
assert MOCKED_MODULES_HEADER + MOCKED_MODULES == generated_mocks
def test_generate_mocks_functions_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=True,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False,
prepare_asserts_calls=False)
assert MOCKED_FUNCTIONS_HEADER + MOCKED_FUNCTIONS == generated_mocks
def test_generate_mocks_object_methods_only():
first = tests.sample.code.tested_module.FirstClass('20')
generated_mocks_instance = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
first,
name='first',
mock_modules=False,
mock_functions=True,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False,
prepare_asserts_calls=False)
assert MOCKED_METHODS_HEADER + MOCKED_METHODS == generated_mocks_instance
def test_generate_mocks_builtin_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=False,
mock_builtin=True,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False,
prepare_asserts_calls=False)
assert MOCKED_FUNCTIONS_HEADER + MOCKED_BUILTIN == generated_mocks
def test_generate_mocks_classes_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=True,
mock_referenced_classes=False,
mock_classes_static=False,
prepare_asserts_calls=False)
assert MOCKED_CLASSES_HEADER + MOCKED_CLASSES == generated_mocks
def test_generate_mocks_referenced_classes_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=True,
mock_classes_static=False,
prepare_asserts_calls=False)
assert MOCKED_CLASSES_HEADER + MOCKED_REFERENCED_CLASSES == generated_mocks
def test_generate_mocks_classes_static_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=True,
mock_referenced_classes=False,
mock_classes_static=True,
prepare_asserts_calls=False)
assert MOCKED_CLASSES_HEADER + MOCKED_CLASSES_STATIC == generated_mocks
def test_generate_mocks_referenced_classes_static_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=True,
mock_classes_static=True,
prepare_asserts_calls=False)
assert MOCKED_CLASSES_HEADER + MOCKED_REFERENCED_CLASSES_STATIC \
== generated_mocks
def test_generate_mocks_prepare_asserts_calls_only():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False,
prepare_asserts_calls=True)
assert not generated_mocks
def test_generate_mocks_all():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module,
mock_modules=True,
mock_functions=True,
mock_builtin=True,
mock_classes=True,
mock_referenced_classes=True,
mock_classes_static=False,
prepare_asserts_calls=True)
assert MOCKED_MODULES_HEADER + MOCKED_MODULES + \
MOCKED_FUNCTIONS_HEADER + MOCKED_FUNCTIONS + MOCKED_BUILTIN + \
MOCKED_CLASSES_HEADER + MOCKED_CLASSES + MOCKED_REFERENCED_CLASSES + \
PREPARE_ASSERTS_CALLS_HEADER + PREPARE_ASSERTS_CALLS_ALL \
== generated_mocks
def test_generate_mocks_default():
generated_mocks = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module)
assert MOCKED_MODULES_HEADER + MOCKED_MODULES + \
MOCKED_FUNCTIONS_HEADER + MOCKED_BUILTIN + \
MOCKED_CLASSES_HEADER + MOCKED_REFERENCED_CLASSES + \
PREPARE_ASSERTS_CALLS_HEADER + PREPARE_ASSERTS_CALLS_DEFAULT == generated_mocks
def _extract_warnings_generated_mocks_and_generated_asserts(expected):
warnings = []
generated_mocks = []
generated_asserts = []
inside_warnings = False
inside_mocks = False
inside_asserts = False
for line in expected.splitlines():
if line == MOCKED_WARNINGS_HEADER.rstrip():
inside_warnings = True
if line == MOCKED_FUNCTIONS_HEADER.rstrip(
) or line == MOCKED_DEPENDENCIES_HEADER.rstrip():
inside_warnings = False
inside_mocks = True
if line == PREPARE_ASSERTS_CALLS_HEADER.splitlines()[0]:
inside_mocks = False
inside_asserts = True
if inside_warnings:
warnings.append(line)
if inside_mocks:
generated_mocks.append(line)
if inside_asserts:
generated_asserts.append(line)
return warnings, generated_mocks, generated_asserts
def test_generate_mocks_function_inner_imports(mocker):
wo_mock = tests.sample.code.tested_module.base_64_whole_modules("my msg1")
assert re.match(r"^MY MSG1.*False$", wo_mock) # without mocks
expected = """# warnings
# could not convert a function call into a mock on node:
# (message.upper() + suffix). \
# encode('ascii')
# mocked dependencies
mock_randint = mocker.MagicMock(name='randint')
mocker.patch('tests.sample.code.tested_module.random.randint', new=mock_randint)
mock_get_random_number = mocker.MagicMock(name='get_random_number')
mocker.patch('tests.sample.code.tested_module.get_random_number', new=mock_get_random_number)
mock_str = mocker.MagicMock(name='str')
mocker.patch('tests.sample.code.tested_module.str', new=mock_str)
mock_isfile = mocker.MagicMock(name='isfile')
mocker.patch('tests.sample.code.tested_module.os.path.isfile', new=mock_isfile)
mock_b64encode = mocker.MagicMock(name='b64encode')
mocker.patch('base64.b64encode', new=mock_b64encode)
mock_b64decode = mocker.MagicMock(name='b64decode')
mocker.patch('base64.b64decode', new=mock_b64decode)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_randint, name='mock_randint')
mock_autogen.generate_asserts(mock_get_random_number, name='mock_get_random_number')
mock_autogen.generate_asserts(mock_str, name='mock_str')
mock_autogen.generate_asserts(mock_isfile, name='mock_isfile')
mock_autogen.generate_asserts(mock_b64encode, name='mock_b64encode')
mock_autogen.generate_asserts(mock_b64decode, name='mock_b64decode')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module.base_64_whole_modules)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
# don't compare warning code since python version might be less than 3.8
assert expected_warnings[0:2] == generated_warnings[0:2]
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated +
"\nmock_b64decode.return_value.decode.return_value = '20'")
w_mock = tests.sample.code.tested_module.base_64_whole_modules("my msg2")
assert "20" == w_mock
def test_generate_mocks_function_inner_imports_partial_functions(mocker):
wo_mock = tests.sample.code.tested_module.base_64_partial_functions(
"my msg1")
assert re.match(r"^MY MSG1.*False$", wo_mock) # without mocks
expected = """# warnings
# could not convert a function call into a mock on node:
# (message.upper() + suffix). \
# encode('ascii')
# mocked dependencies
mock_randint = mocker.MagicMock(name='randint')
mocker.patch('tests.sample.code.tested_module.random.randint', new=mock_randint)
mock_get_random_number = mocker.MagicMock(name='get_random_number')
mocker.patch('tests.sample.code.tested_module.get_random_number', new=mock_get_random_number)
mock_str = mocker.MagicMock(name='str')
mocker.patch('tests.sample.code.tested_module.str', new=mock_str)
mock_isfile = mocker.MagicMock(name='isfile')
mocker.patch('tests.sample.code.tested_module.os.path.isfile', new=mock_isfile)
mock_b64encode = mocker.MagicMock(name='b64encode')
mocker.patch('base64.b64encode', new=mock_b64encode)
mock_b64decode = mocker.MagicMock(name='b64decode')
mocker.patch('base64.b64decode', new=mock_b64decode)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_randint, name='mock_randint')
mock_autogen.generate_asserts(mock_get_random_number, name='mock_get_random_number')
mock_autogen.generate_asserts(mock_str, name='mock_str')
mock_autogen.generate_asserts(mock_isfile, name='mock_isfile')
mock_autogen.generate_asserts(mock_b64encode, name='mock_b64encode')
mock_autogen.generate_asserts(mock_b64decode, name='mock_b64decode')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module.base_64_partial_functions)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
# don't compare warning code since python version might be less than 3.8
assert expected_warnings[0:2] == generated_warnings[0:2]
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated +
"\nmock_b64decode.return_value.decode.return_value = '20'")
w_mock = tests.sample.code.tested_module.base_64_partial_functions(
"my msg2")
assert "20" == w_mock
def test_generate_mocks_function_list_comprehension(mocker):
wo_mock = get_square_root([1, 4, 9])
assert [1, 2, 3] == wo_mock # without mocks
expected = """# mocked dependencies
mock_sqrt = mocker.MagicMock(name='sqrt')
mocker.patch('tests.sample.code.comprehensions_and_loops.math.sqrt', new=mock_sqrt)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_sqrt, name='mock_sqrt')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK, get_square_root)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated +
"\nmock_sqrt.side_effect = [-1]*len('not a list of numbers')")
w_mock = get_square_root('not a list of numbers')
assert [-1] * len('not a list of numbers') == w_mock
def test_generate_mocks_function_list_comprehension_external_variable(mocker):
wo_mock = get_square_root_external_variable()
assert [1, 2, 3] == wo_mock # without mocks
expected = """# mocked dependencies
mock_sqrt = mocker.MagicMock(name='sqrt')
mocker.patch('tests.sample.code.comprehensions_and_loops.math.sqrt', new=mock_sqrt)
mock_external_items = mocker.MagicMock(name='external_items')
mocker.patch('tests.sample.code.comprehensions_and_loops.external_items', new=mock_external_items)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_sqrt, name='mock_sqrt')
mock_autogen.generate_asserts(mock_external_items, name='mock_external_items')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
get_square_root_external_variable)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated +
"\nmock_sqrt.side_effect = [-1]*len('not a list of numbers')"
"\nmock_external_items.__iter__.return_value = [9, 16, 25, 36]")
w_mock = get_square_root_external_variable()
assert [-1] * 4 == w_mock # we changed the number of items in the external
def test_generate_mocks_lock_external_variable(mocker, capsys):
with_statements.single_thread_dict = {}
wo_mock = with_statements.outside_lock_context("some", "value")
assert "value" == wo_mock # without mocks
wo_mock = with_statements.outside_lock_context("some", "other value")
assert "value" == wo_mock # without mocks
expected = """# mocked dependencies
mock_lock = mocker.MagicMock(name='lock')
mocker.patch('tests.sample.code.with_statements.lock', new=mock_lock)
mock_single_thread_dict = mocker.MagicMock(name='single_thread_dict')
mocker.patch('tests.sample.code.with_statements.single_thread_dict', new=mock_single_thread_dict)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_lock, name='mock_lock')
mock_autogen.generate_asserts(mock_single_thread_dict, name='mock_single_thread_dict')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
with_statements.outside_lock_context)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec("\n".join(generated_mocks) +
"\nmock_single_thread_dict.__contains__.return_value = False"
"\nmock_single_thread_dict.__getitem__.return_value = 'strange'")
w_mock = with_statements.outside_lock_context("some", "third value")
assert 'strange' == w_mock
capsys.readouterr().out # this clears the existing output
exec("\n".join(generated_asserts))
expected_mock_results = """mock_lock.__enter__.assert_called_once_with()
mock_lock.__exit__.assert_called_once_with(None, None, None)
mock_single_thread_dict.__contains__.assert_called_once_with('some')
mock_single_thread_dict.__setitem__.assert_called_once_with('some', 'third value')
mock_single_thread_dict.__getitem__.assert_called_once_with('some')
"""
assert expected_mock_results == capsys.readouterr().out
def test_generate_mocks_function_dict_comprehension(mocker):
expected = """# mocked dependencies
mock_len = mocker.MagicMock(name='len')
mocker.patch('tests.sample.code.comprehensions_and_loops.len', new=mock_len)
mock_items = mocker.MagicMock(name='items')
mocker.patch('tests.sample.code.comprehensions_and_loops.os.environ.items', new=mock_items)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_len, name='mock_len')
mock_autogen.generate_asserts(mock_items, name='mock_items')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
summarize_environ_values)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated + "\nmock_len.side_effect = range(3)" +
"\nmock_items.return_value = (('a','b'), ('c','d'), ('e','f'),)")
w_mock = summarize_environ_values()
assert {'a': 0, 'c': 1, 'e': 2} == w_mock
def test_generate_mocks_function_dict_comprehension_ignore_variables(mocker):
expected = """# mocked dependencies
mock_len = mocker.MagicMock(name='len')
mocker.patch('tests.sample.code.comprehensions_and_loops.len', new=mock_len)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_len, name='mock_len')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK, trimmed_strings)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated + "\nmock_len.return_value = 20")
w_mock = trimmed_strings(["a", "bb", "cc "])
assert {'a': 20, 'cc': 20, 'bb': 20} == w_mock
def test_generate_mocks_function_subscript(mocker):
expected = """# mocked dependencies
mock_sqrt = mocker.MagicMock(name='sqrt')
mocker.patch('tests.sample.code.subscripts.math.sqrt', new=mock_sqrt)
mock_randint = mocker.MagicMock(name='randint')
mocker.patch('tests.sample.code.subscripts.random.randint', new=mock_randint)
mock_str = mocker.MagicMock(name='str')
mocker.patch('tests.sample.code.subscripts.str', new=mock_str)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_sqrt, name='mock_sqrt')
mock_autogen.generate_asserts(mock_randint, name='mock_randint')
mock_autogen.generate_asserts(mock_str, name='mock_str')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
list_subscript_games)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated + "\nmock_sqrt.return_value = 0" +
"\nmock_randint.return_value = 0" + "\nmock_str.return_value = '7'")
my_list = [1, 2, 3, 4, 5]
list_subscript_games(my_list)
assert [-1, '7', 5] == my_list
def test_generate_mocks_function_same_function_name_different_objects(mocker):
wo_mock = get_username_and_password()
assert "some_username,some_password" == wo_mock # without mocks
expected = """# mocked dependencies
mock_get = mocker.MagicMock(name='get')
mocker.patch('tests.sample.code.same_method_name.get', new=mock_get)
mock_get_2 = mocker.MagicMock(name='get_2')
mocker.patch('tests.sample.code.same_method_name.os.environ.get', new=mock_get_2)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_get, name='mock_get')
mock_autogen.generate_asserts(mock_get_2, name='mock_get_2')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
get_username_and_password)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated + "\nmock_get.return_value = 'made_up_username'"
"\nmock_get_2.return_value = 'made_up_password'")
w_mock = get_username_and_password()
assert 'made_up_username,made_up_password' == w_mock
def test_generate_mocks_method_inner_calls(mocker):
bin_op_class_name = 'ast.BinOp' if sys.version_info >= (
3, 9) else '_ast.BinOp'
global_before = tests.sample.code.tested_module.global_counter
prop_before = tests.sample.code.tested_module.FirstClass.prop
first = tests.sample.code.tested_module.FirstClass('20')
expected = f"""# warnings
# could not convert a function call into a mock on node:
# (suffix.upper() + suffix).encode('ascii')
# Can't stringify node of type <class '{bin_op_class_name}'>
# mocked dependencies
mock_randint = mocker.MagicMock(name='randint')
mocker.patch('tests.sample.code.tested_module.random.randint', new=mock_randint)
mock_get_random_number = mocker.MagicMock(name='get_random_number')
mocker.patch('tests.sample.code.tested_module.get_random_number', new=mock_get_random_number)
mock_str = mocker.MagicMock(name='str')
mocker.patch('tests.sample.code.tested_module.str', new=mock_str)
mock_isfile = mocker.MagicMock(name='isfile')
mocker.patch('tests.sample.code.tested_module.os.path.isfile', new=mock_isfile)
mock_b64encode = mocker.MagicMock(name='b64encode')
mocker.patch('base64.b64encode', new=mock_b64encode)
mock_b64decode = mocker.MagicMock(name='b64decode')
mocker.patch('base64.b64decode', new=mock_b64decode)
mock_increase_global_counter = mocker.MagicMock(name='increase_global_counter')
mocker.patch('tests.sample.code.tested_module.FirstClass.increase_global_counter', new=mock_increase_global_counter)
mock_increase_class_counter = mocker.MagicMock(name='increase_class_counter')
mocker.patch('tests.sample.code.tested_module.FirstClass.increase_class_counter', new=mock_increase_class_counter)
mock_not_implemented = mocker.MagicMock(name='not_implemented')
mocker.patch('tests.sample.code.tested_module.FirstClass.not_implemented', new=mock_not_implemented)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_randint, name='mock_randint')
mock_autogen.generate_asserts(mock_get_random_number, name='mock_get_random_number')
mock_autogen.generate_asserts(mock_str, name='mock_str')
mock_autogen.generate_asserts(mock_isfile, name='mock_isfile')
mock_autogen.generate_asserts(mock_b64encode, name='mock_b64encode')
mock_autogen.generate_asserts(mock_b64decode, name='mock_b64decode')
mock_autogen.generate_asserts(mock_increase_global_counter, name='mock_increase_global_counter')
mock_autogen.generate_asserts(mock_increase_class_counter, name='mock_increase_class_counter')
mock_autogen.generate_asserts(mock_not_implemented, name='mock_not_implemented')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
first.using_not_implemented)
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(generated)
# don't compare warning code since python version might be less than 3.8
assert expected_warnings[0:2] == generated_warnings[0:2]
if sys.version_info >= (3, 8):
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
exec(generated) # verify the validity of generated mocks code
first.using_not_implemented()
assert global_before == tests.sample.code.tested_module.global_counter
assert prop_before == tests.sample.code.tested_module.FirstClass.prop
exec("mock_not_implemented.assert_called_once()")
def test_generate_mocks_static_method_inner_calls(mocker):
global_before = tests.sample.code.tested_module.global_counter
prop_before = tests.sample.code.tested_module.FirstClass.prop
first = tests.sample.code.tested_module.FirstClass('20')
expected = """# mocked dependencies
mock_get_random_number = mocker.MagicMock(name='get_random_number')
mocker.patch('tests.sample.code.tested_module.get_random_number', new=mock_get_random_number)
mock_staticmethod = mocker.MagicMock(name='staticmethod')
mocker.patch('tests.sample.code.tested_module.staticmethod', new=mock_staticmethod)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_get_random_number, name='mock_get_random_number')
mock_autogen.generate_asserts(mock_staticmethod, name='mock_staticmethod')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated_mocks_function = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
first.increase_global_counter)
generated_mocks_function_from_class = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module.FirstClass.increase_global_counter)
assert generated_mocks_function == generated_mocks_function_from_class
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(
generated_mocks_function)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated_mocks_function +
f"\nmock_get_random_number.return_value = {global_before}")
first.increase_global_counter()
assert global_before == tests.sample.code.tested_module.global_counter
assert prop_before == tests.sample.code.tested_module.FirstClass.prop
exec("mock_get_random_number.assert_called_once()")
def test_generate_mocks_class_method_inner_calls(mocker):
global_before = tests.sample.code.tested_module.global_counter
prop_before = tests.sample.code.tested_module.FirstClass.prop
first = tests.sample.code.tested_module.FirstClass('20')
expected = """# mocked dependencies
mock_get_random_number = mocker.MagicMock(name='get_random_number')
mocker.patch('tests.sample.code.tested_module.get_random_number', new=mock_get_random_number)
mock_increase_global_counter = mocker.MagicMock(name='increase_global_counter')
mocker.patch('tests.sample.code.tested_module.FirstClass.increase_global_counter', new=mock_increase_global_counter)
mock_classmethod = mocker.MagicMock(name='classmethod')
mocker.patch('tests.sample.code.tested_module.classmethod', new=mock_classmethod)
# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_get_random_number, name='mock_get_random_number')
mock_autogen.generate_asserts(mock_increase_global_counter, name='mock_increase_global_counter')
mock_autogen.generate_asserts(mock_classmethod, name='mock_classmethod')
"""
expected_warnings, expected_mocks, expected_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(expected)
generated_mocks_function = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
first.increase_class_counter)
generated_mocks_function_from_class = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
tests.sample.code.tested_module.FirstClass.increase_class_counter)
assert generated_mocks_function == generated_mocks_function_from_class
generated_warnings, generated_mocks, generated_asserts = \
_extract_warnings_generated_mocks_and_generated_asserts(
generated_mocks_function)
assert expected_warnings == generated_warnings
assert expected_mocks == generated_mocks
assert expected_asserts == generated_asserts
# verify the validity of generated mocks code
exec(generated_mocks_function +
f"\nmock_get_random_number.return_value = {prop_before}")
first.increase_class_counter()
assert global_before == tests.sample.code.tested_module.global_counter
assert prop_before == tests.sample.code.tested_module.FirstClass.prop
exec("mock_get_random_number.assert_called_once()")
exec("mock_increase_global_counter.assert_called_once()")
def test_generate_asserts_are_in_same_folder_args(mock_everything_collection):
tests.sample.code.tested_module.are_in_same_folder('/some/path/file1.txt',
'/some/path/file2.txt')
mock_are_in_same_folder = mock_everything_collection.are_in_same_folder
generated = mock_autogen.generator.generate_asserts(
mock_are_in_same_folder)
assert 'assert 1 == mock_are_in_same_folder.call_count\n' \
"mock_are_in_same_folder.assert_called_once_with(" \
"'/some/path/file1.txt', '/some/path/file2.txt')\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_rename_argument(mock_everything_collection):
tests.sample.code.tested_module.are_in_same_folder('/some/path/file1.txt',
'/some/path/file2.txt')
mock_are_in_same_folder = mock_everything_collection.are_in_same_folder
generated = mock_autogen.generator.generate_asserts(
mock_are_in_same_folder, name='my_mock')
assert 'assert 1 == my_mock.call_count\n' \
"my_mock.assert_called_once_with(" \
"'/some/path/file1.txt', '/some/path/file2.txt')\n" == generated
def test_generate_asserts_unable_to_find_argument(mock_everything_collection):
tests.sample.code.tested_module.are_in_same_folder('/some/path/file1.txt',
'/some/path/file2.txt')
generated = mock_autogen.generator.generate_asserts(
mock_everything_collection.are_in_same_folder)
assert 'assert 1 == arg.call_count\n' \
"arg.assert_called_once_with(" \
"'/some/path/file1.txt', '/some/path/file2.txt')\n" == generated
def test_generate_asserts_mocks_were_not_called(mock_everything_collection):
for mocked in mock_everything_collection:
generated = mock_autogen.generator.generate_asserts(mocked)
assert "mocked.assert_not_called()" == generated
exec(generated)
def test_generate_asserts_are_in_same_folder_kwargs(
mock_functions_only_collection):
tests.sample.code.tested_module.are_in_same_folder(
path1='/some/path/file1.txt', path2='/some/path/file2.txt')
mock_are_in_same_folder = mock_functions_only_collection.are_in_same_folder
generated = mock_autogen.generator.generate_asserts(
mock_are_in_same_folder)
assert "assert 1 == mock_are_in_same_folder.call_count\n" \
"mock_are_in_same_folder.assert_called_once_with(" \
"path1='/some/path/file1.txt', " \
"path2='/some/path/file2.txt')\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_are_in_same_folder_mix_args_kwargs(
mock_everything_collection):
tests.sample.code.tested_module.are_in_same_folder(
'/some/path/file1.txt', path2='/some/path/file2.txt')
mock_are_in_same_folder = mock_everything_collection.are_in_same_folder
generated = mock_autogen.generator.generate_asserts(
mock_are_in_same_folder)
assert "assert 1 == mock_are_in_same_folder.call_count\n" \
"mock_are_in_same_folder.assert_called_once_with(" \
"'/some/path/file1.txt', " \
"path2='/some/path/file2.txt')\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_rm_alias_builtin_only(mock_builtin_only_collection):
tests.sample.code.tested_module.rm_alias('/some/path/file1.txt')
mock_os_remove = mock_builtin_only_collection.os_remove
generated = mock_autogen.generator.generate_asserts(mock_os_remove)
assert "assert 1 == mock_os_remove.call_count\n" \
"mock_os_remove.assert_called_once_with('/some/path/file1.txt')\n" \
== generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_append_to_cwd_builtin_only(
mock_modules_only_collection):
tests.sample.code.tested_module.append_to_cwd('/some/path/file1.txt')
mock_os = mock_modules_only_collection.os
generated = mock_autogen.generator.generate_asserts(mock_os)
assert re.match(
r"^mock_os.getcwd.assert_called_once_with\(\)\n"
r"mock_os.path.join.assert_called_once_with"
r"\(<MagicMock name='os.getcwd\(\)' id='\d+'>, "
r"'/some/path/file1.txt'\)\n$", generated)
# added ANY to match the mock parameter
from mock import ANY
mock_os.path.join.assert_called_once_with(ANY, '/some/path/file1.txt')
mock_os.getcwd.assert_called_once_with()
def test_generate_asserts_append_to_cwd_builtin_only_mocked_cwd(
mock_modules_only_collection):
mock_os = mock_modules_only_collection.os
# added this so the assert can be affective.
# this is an example of the code the user has to add on top of the utility
mock_os.getcwd.return_value = '/some/pwd'
tests.sample.code.tested_module.append_to_cwd('/some/path/file1.txt')
generated = mock_autogen.generator.generate_asserts(mock_os)
assert "mock_os.getcwd.assert_called_once_with()\n" \
"mock_os.path.join.assert_called_once_with" \
"('/some/pwd', '/some/path/file1.txt')\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_add_mix_types(mock_functions_only_collection):
tests.sample.code.tested_module.add('one', 2)
mock_add = mock_functions_only_collection.add
generated = mock_autogen.generator.generate_asserts(mock_add)
assert 'assert 1 == mock_add.call_count\n' \
"mock_add.assert_called_once_with(" \
"'one', 2)\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_add_multiple_calls(mock_functions_only_collection):
tests.sample.code.tested_module.add(1, 2)
tests.sample.code.tested_module.add('one', 'two')
mock_add = mock_functions_only_collection.add
generated = mock_autogen.generator.generate_asserts(mock_add)
assert 'from mock import call\n\n' \
'assert 2 == mock_add.call_count\n' \
"mock_add.assert_has_calls(calls=[call(1, 2)," \
"call('one', 'two'),])\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_context_manager(mock_modules_only_collection):
tests.sample.code.tested_module.process_and_zip('/path/to.zip',
'in_zip.txt', 'foo bar')
mock_zipfile = mock_modules_only_collection.zipfile
generated = mock_autogen.generator.generate_asserts(mock_zipfile)
assert "mock_zipfile.ZipFile.assert_called_once_with(" \
"'/path/to.zip', 'w')\n" \
"mock_zipfile.ZipFile.return_value.__enter__." \
"assert_called_once_with()\n" \
"mock_zipfile.ZipFile.return_value.__enter__." \
"return_value.writestr.assert_called_once_with(" \
"'in_zip.txt', 'processed foo bar')\n" \
"mock_zipfile.ZipFile.return_value.__exit__." \
"assert_called_once_with(None, None, None)\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_class(mocker):
# mocked classes
mock_FirstClass = mocker.MagicMock(
name='FirstClass', spec=tests.sample.code.tested_module.FirstClass)
mocker.patch('tests.sample.code.tested_module.FirstClass',
new=mock_FirstClass)
tests.sample.code.tested_module.use_first_class('20')
generated = mock_autogen.generator.generate_asserts(mock_FirstClass)
assert "assert 1 == mock_FirstClass.call_count\n" \
"mock_FirstClass.assert_called_once_with('20')\n" \
"mock_FirstClass.return_value.not_implemented." \
"assert_called_once_with(None)\n" == generated
exec(generated) # verify the validity of assertions
def test_generate_asserts_non_overridden_repr(mocker):
# mocked classes
mock_FirstClass = mocker.MagicMock(
name='FirstClass', spec=tests.sample.code.tested_module.FirstClass)
mocker.patch('tests.sample.code.tested_module.FirstClass',
new=mock_FirstClass)
tests.sample.code.tested_module.use_first_class(
'20', tests.sample.code.tested_module.SecondClass(42))
generated = mock_autogen.generator.generate_asserts(mock_FirstClass)
assert re.match(
r"^assert 1 == mock_FirstClass.call_count\n"
r"mock_FirstClass.assert_called_once_with\('20'\)\n"
r"mock_FirstClass.return_value.not_implemented."
r"assert_called_once_with\(_tests.sample.code.tested_module."
r"SecondClass_object_at_0x[0-9A-Fa-f]+_\)\n$", generated)
def test_generate_asserts_class_static(mocker):
# mocked classes
class MockedSecondClassMeta(type):
static_instance = mocker.MagicMock(
spec=tests.sample.code.tested_module.SecondClass)
def __getattr__(cls, key):
return MockedSecondClassMeta.static_instance.__getattr__(key)
class MockedSecondClass(metaclass=MockedSecondClassMeta):
original_cls = tests.sample.code.tested_module.SecondClass
instances = []
def __new__(cls, *args, **kwargs):
MockedSecondClass.instances.append(
mocker.MagicMock(spec=MockedSecondClass.original_cls))
MockedSecondClass.instances[-1].__class__ = MockedSecondClass
return MockedSecondClass.instances[-1]
mocker.patch('tests.sample.code.tested_module.SecondClass',
new=MockedSecondClass)
tests.sample.code.tested_module.use_second_class_static('20')
assert 1 == len(MockedSecondClass.instances)
generated_static = mock_autogen.generator.generate_asserts(
MockedSecondClassMeta.static_instance,
name="MockedSecondClassMeta.static_instance")
assert re.match(
r"^MockedSecondClassMeta.static_instance.prop.__eq__."
r"assert_called_once_with\(<MagicMock "
r"name='mock.prop' id='\d+'>\)\n$", generated_static)
generated_instance = mock_autogen.generator.generate_asserts(
MockedSecondClass.instances[0], name="MockedSecondClass.instances[0]")
assert re.match(
r"^MockedSecondClass.instances\[0\].not_implemented."
r"assert_called_once_with\(\)\n"
r"MockedSecondClass.instances\[0\].prop.__eq__."
r"assert_called_once_with\("
r"<MagicMock name='mock.prop' id='\d+'>\)\n$", generated_instance)
def test_class_static_objects_behave_the_same(mocker):
# mocked classes
class MockedSecondClassMeta(type):
static_instance = mocker.MagicMock(
spec=tests.sample.code.tested_module.SecondClass)
def __getattr__(cls, key):
return MockedSecondClassMeta.static_instance.__getattr__(key)
class MockedSecondClass(metaclass=MockedSecondClassMeta):
original_cls = tests.sample.code.tested_module.SecondClass
instances = []
def __new__(cls, *args, **kwargs):
MockedSecondClass.instances.append(
mocker.MagicMock(spec=MockedSecondClass.original_cls))
MockedSecondClass.instances[-1].__class__ = MockedSecondClass
return MockedSecondClass.instances[-1]
mocker.patch('tests.sample.code.tested_module.SecondClass',
new=MockedSecondClass)
second = tests.sample.code.tested_module.SecondClass('20')
second.not_implemented()
with pytest.raises(AttributeError):
second.unknown_method()
assert isinstance(second, tests.sample.code.tested_module.SecondClass)
def test_referenced_class(mock_referenced_classes_only_collection):
mock_referenced_classes_only_collection.datetime.utcnow.return_value = 20
current_time = tests.sample.code.tested_module.get_current_time()
assert 20 == current_time
def test_mock_object_instance(mocker):
first = tests.sample.code.tested_module.FirstClass('20')
exec(MOCKED_METHODS) # mocks all the methods
first.not_implemented() # would have raised exception otherwise
first.not_implemented.assert_called_once_with()
def test_mock_object_class_direct(mocker):
first_class = tests.sample.code.tested_module.FirstClass
generated_mocks_class = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
first_class,
name='first_class',
mock_modules=False,
mock_functions=True,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False)
exec(generated_mocks_class)
first_class_instance = first_class(42)
first_class_instance.not_implemented('some param')
first_class_instance.not_implemented.assert_called_once_with('some param')
def test_mock_object_class_indirect(mocker):
first_class = tests.sample.code.tested_module.FirstClass
generated_mocks_class = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
first_class,
name='first_class',
mock_modules=False,
mock_functions=True,
mock_builtin=False,
mock_classes=False,
mock_referenced_classes=False,
mock_classes_static=False)
exec(generated_mocks_class)
first_class_instance = tests.sample.code.tested_module.FirstClass(42)
first_class_instance.not_implemented('some param')
first_class_instance.not_implemented.assert_called_once_with('some param')
@pytest.mark.parametrize('static', [True, False])
def test_generate_mocks_mocked_class_equals_to_module(static):
module = tests.sample.code.second_module
single_class = module.SingleClassInModule
generated_mocks_module = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
module,
name='some_name',
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=True,
mock_referenced_classes=False,
mock_classes_static=static)
generated_mocks_class = mock_autogen.generator.generate_mocks(
mock_autogen.generator.MockingFramework.PYTEST_MOCK,
single_class,
name='some_name',
mock_modules=False,
mock_functions=False,
mock_builtin=False,
mock_classes=True,
mock_referenced_classes=False,
mock_classes_static=static)
assert generated_mocks_module == generated_mocks_class
def test_generate_mocks_invalid_framework():
with pytest.raises(ValueError):
mock_autogen.generator.generate_mocks('unittest', tests.sample.code)
def test_generate_asserts_invalid_object():
with pytest.raises(TypeError):
mock_autogen.generator.generate_asserts('not a mock')
def test__single_call_to_generate_asserts():
assert "mock_autogen.generate_asserts(mock_name, name='mock_name')\n" == \
mock_autogen.generator._single_call_to_generate_asserts("mock_name")
@pytest.mark.parametrize('prepare_asserts_calls', [True, False])
def test__pytest_mock_dependencies_generate_no_functions(
prepare_asserts_calls):
generated = mock_autogen.generator._pytest_mock_dependencies_generate(
set(), prepare_asserts_calls)
assert "" == generated
@pytest.mark.parametrize('prepare_asserts_calls', [True, False])
def test__pytest_mock_dependencies_generate_one_function(
prepare_asserts_calls):
expected = """# mocked dependencies
mock_first_function = mocker.MagicMock(name='first_function')
mocker.patch('one.object.first_function', new=mock_first_function)
"""
if prepare_asserts_calls:
expected += """# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_first_function, name='mock_first_function')
"""
generated = mock_autogen.generator._pytest_mock_dependencies_generate(
[('one.object', 'first_function')], prepare_asserts_calls)
assert expected == generated
@pytest.mark.parametrize('prepare_asserts_calls', [True, False])
def test__pytest_mock_dependencies_generate_two_functions(
prepare_asserts_calls):
expected = """# mocked dependencies
mock_first_function = mocker.MagicMock(name='first_function')
mocker.patch('one.object.first_function', new=mock_first_function)
mock_second_function = mocker.MagicMock(name='second_function')
mocker.patch('second.object.second_function', new=mock_second_function)
"""
if prepare_asserts_calls:
expected += """# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_first_function, name='mock_first_function')
mock_autogen.generate_asserts(mock_second_function, name='mock_second_function')
"""
generated = mock_autogen.generator._pytest_mock_dependencies_generate(
[('one.object', 'first_function'),
('second.object', 'second_function')], prepare_asserts_calls)
assert expected == generated
@pytest.mark.parametrize('prepare_asserts_calls', [True, False])
def test__pytest_mock_dependencies_generate_two_functions_duplicate(
prepare_asserts_calls):
expected = """# mocked dependencies
mock_first_function = mocker.MagicMock(name='first_function')
mocker.patch('one.object.first_function', new=mock_first_function)
mock_first_function_2 = mocker.MagicMock(name='first_function_2')
mocker.patch('second.object.first_function', new=mock_first_function_2)
"""
if prepare_asserts_calls:
expected += """# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_first_function, name='mock_first_function')
mock_autogen.generate_asserts(mock_first_function_2, name='mock_first_function_2')
"""
generated = mock_autogen.generator._pytest_mock_dependencies_generate(
[('one.object', 'first_function'),
('second.object', 'first_function')], prepare_asserts_calls)
assert expected == generated
@pytest.mark.parametrize('prepare_asserts_calls', [True, False])
def test__pytest_mock_dependencies_generate_four_functions_duplicate(
prepare_asserts_calls):
expected = """# mocked dependencies
mock_first_function = mocker.MagicMock(name='first_function')
mocker.patch('one.object.first_function', new=mock_first_function)
mock_second_function = mocker.MagicMock(name='second_function')
mocker.patch('second.object.second_function', new=mock_second_function)
mock_first_function_2 = mocker.MagicMock(name='first_function_2')
mocker.patch('third.sub.module.first_function', new=mock_first_function_2)
mock_first_function_3 = mocker.MagicMock(name='first_function_3')
mocker.patch('fourth.first_function', new=mock_first_function_3)
"""
if prepare_asserts_calls:
expected += """# calls to generate_asserts, put this after the 'act'
import mock_autogen
mock_autogen.generate_asserts(mock_first_function, name='mock_first_function')
mock_autogen.generate_asserts(mock_second_function, name='mock_second_function')
mock_autogen.generate_asserts(mock_first_function_2, name='mock_first_function_2')
mock_autogen.generate_asserts(mock_first_function_3, name='mock_first_function_3')
"""
generated = mock_autogen.generator._pytest_mock_dependencies_generate(
[('one.object', 'first_function'),
('second.object', 'second_function'),
('third.sub.module', 'first_function'), ('fourth', 'first_function')],
prepare_asserts_calls)
assert expected == generated
|
1628687
|
from unittest import TestCase
import sys
sys.path.append("./PathPlanning/RRTDubins/")
sys.path.append("./PathPlanning/DubinsPath/")
try:
from PathPlanning.RRTDubins import rrt_dubins as m
# from RRTDubins import rrt_dubins as m
except:
raise
print(__file__)
class Test(TestCase):
def test1(self):
m.show_animation = False
m.main()
|
1628718
|
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from os.path import join
from aequilibrae.paths import NetworkSkimming, SkimResults
from ..common_tools.auxiliary_functions import *
from ..common_tools.global_parameters import *
from ..common_tools.worker_thread import WorkerThread
class TSPProcedure(WorkerThread):
def __init__(self, parentThread, graph, depot, vehicles):
WorkerThread.__init__(self, parentThread)
self.graph = graph
self.depot = depot
self.vehicles = vehicles
self.error = None
self.mult = 100
self.report = []
self.node_sequence = []
def doWork(self):
res = SkimResults()
res.prepare(self.graph)
ns = NetworkSkimming(self.graph, res)
ns.execute()
skm = res.skims
mat = (skm.get_matrix(skm.names[0]) * self.mult).astype(int)
self.depot = list(skm.index).index(self.depot)
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(mat.shape[0], self.vehicles, self.depot)
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return mat[from_node, to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if not solution:
self.error = 'Solution not found'
self.report.append(self.error)
else:
self.report.append(f'Objective function value: {solution.ObjectiveValue() / self.mult}')
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
p = skm.index[manager.IndexToNode(index)]
self.node_sequence.append(p)
plan_output += f' {p} ->'
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
p = skm.index[manager.IndexToNode(index)]
self.node_sequence.append(p)
plan_output += f' {p}\n'
self.report.append(plan_output)
self.finished_threaded_procedure.emit("TSP")
|
1628722
|
import torch
from data.data_transforms import ifft2, fft2, complex_abs
image = torch.rand(10, 20, 30, 2)
lr_flip = torch.flip(image, dims=[-2])
ud_flip = torch.flip(image, dims=[-3])
all_flip = torch.flip(image, dims=[-3, -2])
kspace = fft2(image)
lr_kspace = fft2(lr_flip)
ud_kspace = fft2(ud_flip)
all_kspace = fft2(all_flip)
absolute = torch.sum(complex_abs(kspace))
lr_abs = torch.sum(complex_abs(lr_kspace))
ud_abs = torch.sum(complex_abs(ud_kspace))
all_abs = torch.sum(complex_abs(all_kspace))
a = torch.allclose(absolute, lr_abs)
b = torch.allclose(absolute, ud_abs)
c = torch.allclose(absolute, all_abs)
print(a, b, c)
|
1628725
|
magic = (b"\x00\x00\x00\x00\x00\x00\x00\x00" +
b"\x00\x00\x00\x00\xc2\xea\x81\x60" +
b"\xb3\x14\x11\xcf\xbd\x92\x08\x00" +
b"\x09\xc7\x31\x8c\x18\x1f\x10\x11")
align_1_checker_value = b'3'
align_1_offset = 32
align_1_length = 1
align_1_value = 4
u64_byte_checker_value = b'3'
align_2_offset = 35
align_2_length = 1
align_2_value = 4
endianness_offset = 37
endianness_length = 1
platform_offset = 39
platform_length = 1
encoding_offset = 70
encoding_length = 1
dataset_offset = 92
dataset_length = 64
file_type_offset = 156
file_type_length = 8
date_created_offset = 164
date_created_length = 8
date_modified_offset = 172
date_modified_length = 8
header_size_offset = 196
header_size_length = 4
page_size_offset = 200
page_size_length = 4
page_count_offset = 204
page_count_length = 4
sas_release_offset = 216
sas_release_length = 8
sas_server_type_offset = 224
sas_server_type_length = 16
os_version_number_offset = 240
os_version_number_length = 16
os_maker_offset = 256
os_maker_length = 16
os_name_offset = 272
os_name_length = 16
page_bit_offset_x86 = 16
page_bit_offset_x64 = 32
subheader_pointer_length_x86 = 12
subheader_pointer_length_x64 = 24
page_type_offset = 0
page_type_length = 2
block_count_offset = 2
block_count_length = 2
subheader_count_offset = 4
subheader_count_length = 2
page_meta_type = 0
page_data_type = 256
page_amd_type = 1024
page_metc_type = 16384
page_comp_type = -28672
page_mix_types = [512, 640]
subheader_pointers_offset = 8
truncated_subheader_id = 1
compressed_subheader_id = 4
compressed_subheader_type = 1
text_block_size_length = 2
row_length_offset_multiplier = 5
row_count_offset_multiplier = 6
col_count_p1_multiplier = 9
col_count_p2_multiplier = 10
row_count_on_mix_page_offset_multiplier = 15
column_name_pointer_length = 8
column_name_text_subheader_offset = 0
column_name_text_subheader_length = 2
column_name_offset_offset = 2
column_name_offset_length = 2
column_name_length_offset = 4
column_name_length_length = 2
column_data_offset_offset = 8
column_data_length_offset = 8
column_data_length_length = 4
column_type_offset = 14
column_type_length = 1
column_format_text_subheader_index_offset = 22
column_format_text_subheader_index_length = 2
column_format_offset_offset = 24
column_format_offset_length = 2
column_format_length_offset = 26
column_format_length_length = 2
column_label_text_subheader_index_offset = 28
column_label_text_subheader_index_length = 2
column_label_offset_offset = 30
column_label_offset_length = 2
column_label_length_offset = 32
column_label_length_length = 2
rle_compression = b'SASYZCRL'
rdc_compression = b'SASYZCR2'
compression_literals = [rle_compression, rdc_compression]
# Incomplete list of encodings, using SAS nomenclature:
# http://support.sas.com/documentation/cdl/en/nlsref/61893/HTML/default/viewer.htm#a002607278.htm
encoding_names = {29: "latin1", 20: "utf-8", 33: "cyrillic", 60: "wlatin2",
61: "wcyrillic", 62: "wlatin1", 90: "ebcdic870"}
class index:
rowSizeIndex = 0
columnSizeIndex = 1
subheaderCountsIndex = 2
columnTextIndex = 3
columnNameIndex = 4
columnAttributesIndex = 5
formatAndLabelIndex = 6
columnListIndex = 7
dataSubheaderIndex = 8
subheader_signature_to_index = {
b"\xF7\xF7\xF7\xF7": index.rowSizeIndex,
b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": index.rowSizeIndex,
b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": index.rowSizeIndex,
b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": index.rowSizeIndex,
b"\xF6\xF6\xF6\xF6": index.columnSizeIndex,
b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": index.columnSizeIndex,
b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": index.columnSizeIndex,
b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": index.columnSizeIndex,
b"\x00\xFC\xFF\xFF": index.subheaderCountsIndex,
b"\xFF\xFF\xFC\x00": index.subheaderCountsIndex,
b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": index.subheaderCountsIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": index.subheaderCountsIndex,
b"\xFD\xFF\xFF\xFF": index.columnTextIndex,
b"\xFF\xFF\xFF\xFD": index.columnTextIndex,
b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnTextIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": index.columnTextIndex,
b"\xFF\xFF\xFF\xFF": index.columnNameIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnNameIndex,
b"\xFC\xFF\xFF\xFF": index.columnAttributesIndex,
b"\xFF\xFF\xFF\xFC": index.columnAttributesIndex,
b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnAttributesIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": index.columnAttributesIndex,
b"\xFE\xFB\xFF\xFF": index.formatAndLabelIndex,
b"\xFF\xFF\xFB\xFE": index.formatAndLabelIndex,
b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": index.formatAndLabelIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": index.formatAndLabelIndex,
b"\xFE\xFF\xFF\xFF": index.columnListIndex,
b"\xFF\xFF\xFF\xFE": index.columnListIndex,
b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnListIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": index.columnListIndex}
|
1628734
|
from __future__ import (absolute_import, print_function, unicode_literals)
from jodel_api.protos import mcs_pb2
from jodel_api.protos import checkin_pb2
from jodel_api.gcmhack import AndroidAccount
from jodel_api.jodel_api import *
|
1628738
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
faminstances = UnwrapElement(IN[0])
elementlist = list()
TransactionManager.Instance.EnsureInTransaction(doc)
for item in faminstances:
try:
item.flipHand()
elementlist.append(item)
except:
elementlist.append(list())
TransactionManager.Instance.TransactionTaskDone()
OUT = elementlist
|
1628743
|
from PyPowerStore.utils import constants
from PyPowerStore.tests.unit_tests.base_test import TestBase
from PyPowerStore.utils.exception import PowerStoreException
import mock
class TestVolume(TestBase):
def test_get_volumes(self):
vol_list = self.provisioning.get_volumes()
self.assertListEqual(vol_list, self.data.volume_list)
def test_get_volume_detail(self):
vol_details = self.provisioning.get_volume_details(self.data.vol_id1)
self.assertEqual(vol_details, self.data.volume1)
def test_create_volume(self):
vol = self.provisioning.create_volume(self.data.vol_name1,
self.data.size)
self.assertIsNone(vol)
def test_modify_volume(self):
vol = self.provisioning.modify_volume(self.data.vol_id1,
self.data.vol_name1)
self.assertIsNone(vol)
def test_delete_volume(self):
vol = self.provisioning.delete_volume(self.data.vol_id1)
self.assertIsNone(vol)
def test_get_volumes_with_filter(self):
querystring = {'name': 'ilike.*test*'}
querystring.update(constants.SELECT_ID_AND_NAME)
with mock.patch.object(self.provisioning.client,
'request') as mock_request:
self.provisioning.get_volumes(filter_dict=querystring,
all_pages=True)
mock_request.assert_called_with(
constants.GET,
constants.GET_VOLUME_LIST_URL.format(
self.provisioning.server_ip),
all_pages=True,
payload=None,
querystring=querystring)
def test_add_protection_policy_for_volume(self):
resp = self.provisioning.add_protection_policy_for_volume(
self.data.vol_id1, self.data.pol_id)
self.assertIsNone(resp)
def test_add_invalid_protection_policy_for_volume(self):
self.assertRaises(PowerStoreException,
self.provisioning.add_protection_policy_for_volume,
self.data.vol_id1,
self.data.invalid_pol_id)
def test_remove_protection_policy_for_volume(self):
resp = self.provisioning.remove_protection_policy_for_volume(
self.data.vol_id1)
self.assertIsNone(resp)
def test_get_volume_by_name(self):
vol_details = self.provisioning.get_volume_by_name(
self.data.vol_name1)
self.assertEqual(vol_details, [self.data.volume1])
def test_map_volume_to_host(self):
resp = self.provisioning.map_volume_to_host(
self.data.vol_id1, self.data.host_id1, self.data.lun)
self.assertIsNone(resp)
def test_unmap_volume_from_host(self):
resp = self.provisioning.unmap_volume_from_host(
self.data.vol_id1, self.data.host_id1)
self.assertIsNone(resp)
def test_map_volume_to_hg(self):
resp = self.provisioning.map_volume_to_host_group(
self.data.vol_id1, self.data.hg_id1, self.data.lun)
self.assertIsNone(resp)
def test_unmap_volume_from_host_group(self):
resp = self.provisioning.unmap_volume_from_host_group(
self.data.vol_id1, self.data.hg_id1)
self.assertIsNone(resp)
def test_create_volume_snapshot(self):
vol_snap_detail = self.protection.create_volume_snapshot(
self.data.vol_id1, description='vol snap description')
self.assertEqual(vol_snap_detail, self.data.vol_snap_detail)
def test_get_volume_snapshots(self):
snap = self.protection.get_volume_snapshots(self.data.vol_id1)
self.assertListEqual(snap, self.data.volume_snap_list)
def test_get_volume_snapshot_details(self):
snap = self.protection.get_volume_snapshot_details(
self.data.vol_snap_id)
self.assertEqual(snap, self.data.vol_snap_detail)
def test_modify_volume_snapshot(self):
snap = self.protection.modify_volume_snapshot(
self.data.vol_snap_id, name='vol_snap')
self.assertEqual(snap, self.data.vol_snap_detail)
def test_delete_volume_snapshot(self):
resp = self.protection.delete_volume_snapshot(self.data.vol_snap_id)
self.assertIsNone(resp)
|
1628804
|
from abc import abstractmethod
from typing import Any
class IDisplay:
@abstractmethod
def Markdown(self, data: str) -> None:
...
@abstractmethod
def display(self, value: Any) -> None:
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.